Merge V8 at 3.7.12.28

Bug: 5688872

Change-Id: Iddb40cae44d51a2b449f2858951e0472771f5981
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 20a796a..f00f119 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -48,6 +48,7 @@
 	src/hydrogen.cc \
 	src/hydrogen-instructions.cc \
 	src/ic.cc \
+	src/incremental-marking.cc \
 	src/interpreter-irregexp.cc \
 	src/isolate.cc \
 	src/jsregexp.cc \
@@ -79,6 +80,7 @@
 	src/serialize.cc \
 	src/snapshot-common.cc \
 	src/spaces.cc \
+	src/store-buffer.cc \
 	src/string-search.cc \
 	src/string-stream.cc \
 	src/strtod.cc \
@@ -175,6 +177,6 @@
 	src/macros.py
 
 V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \
-	src/proxy.js \
-	src/weakmap.js
+	src/collection.js \
+	src/proxy.js
 
diff --git a/ChangeLog b/ChangeLog
index 99495dd..4d62981 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,231 @@
+2011-12-01: Version 3.7.12
+
+        Increase tick interval for the android platform.
+
+        Fix a bug in the register allocator. (chromium:105112)
+
+        Fix handling of recompiling code. (chromium:105375, v8:1782)
+
+        Start incremental marking on idle notification. (v8:1458)
+
+        Build fixes for various platforms.
+
+        Various performance improvements.
+
+
+2011-11-29: Version 3.7.11
+
+        Fixed bug when generating padding to ensure space for lazy
+        deoptimization.
+        (issue 1846)
+
+        Further reduced pause times due to GC.
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-23: Version 3.7.10
+
+        Set maximum length of FixedArray in terms of elements instead an
+        absolute number of bytes.
+        (Chromium issue 103103)
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-21: Version 3.7.9
+
+        Removed exit-time destructors.
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-17: Version 3.7.8
+
+        Removed hidden prototype from builtins, i.e., deleting an overridden
+        function on builtins will not make the original function reappear.
+
+        Added NetBSD support for scons build.
+
+        Performance improvements on all platforms.
+
+
+2011-11-14: Version 3.7.7
+
+        Fix missing fast property accessors in heap snapshots.
+        (issue 1818)
+
+
+2011-11-11: Version 3.7.6
+
+        Fixed filtering of store buffer for large object pages.
+        (issue 1817)
+
+        Fixed generated hash function on all platforms.
+        (issue 1808)
+
+        Fixed Heap::Shrink to ensure that it does not free pages that are
+        still in use.
+        (Chromium issue 100414)
+
+        Stability and performance improvements on all platforms.
+
+
+2011-11-10: Version 3.7.5
+
+        Added initial gyp infrastructure for MIPS.
+
+        Implemented performance improvements to the incremental garbage
+        collector.
+
+        Added optimizations and stability improvements on all platforms.
+
+
+2011-11-07: Version 3.7.4
+
+        Proper "libv8.so.3.7.4" SONAME for Linux shared library (issue 1786).
+
+        Fix Harmony sets and maps to allow null and undefined as keys
+        (still hidden behind --harmony flag) (issue 1622).
+
+        Implement VirtualMemory on FreeBSD to fix build (issue 1807).
+
+        Enable VFP instructions for Android.
+
+        Fix error handling in Date.prototype.toISOString (issue 1792).
+
+        Bug fixes and performance improvements for all platforms.
+
+        Not officially supported but noteworthy: Crankshaft for MIPS :-)
+
+
+2011-10-28: Version 3.7.3
+
+        Slight deoptimization as a workaround for issue with jslint: Issue
+        1789.
+
+
+2011-10-27: Version 3.7.2
+
+        Fix bug in deoptimization.  Known issue with jslint: Issue 1789.
+
+
+2011-10-26: Version 3.7.1
+
+        Achieved 33% speedup in debug-mode tests.
+
+        Removed special casing of calls to RegExp test and exec methods with no
+        argument.  Now matches new JSC behaviour.  crbug.com/75740.
+
+        Return the empty string on cyclic references in toString (ES5
+        conformance).
+
+        Fixed bug triggered by JSBeautifier.  crbug.com/100409.
+
+        Made Math.random state per-context instead of per-process (issue 864).
+
+        Fixed stack traces to skip native functions.
+
+        Make snapshots (new contexts) smaller and faster.
+
+        Fixed handling of Function.apply for non-array arguments.
+
+        Fixed evaluation order in defineProperties to match FireFox.
+
+        Fixed handling of non-object receivers for array builtins,
+        crbug.com/100702.
+
+        Multiple fixes to improve compliance with test262.
+
+        Fixed compatibility with older Android releases.
+
+        Fixed compilation with gcc-4.5.3.
+
+        Improved performance of WriteUtf8, issue 1665.
+
+        Made native syntax an early error in the preparser.
+
+        Fixed issues 793 and 893 relating to Function.prototype.bind.
+
+        Improved let, const, Set and Map support and other Harmony features
+        (behind the --harmony flag).
+
+        Changed evaluation order for > and <= to match ES5 instead of ES3.
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-10-13: Version 3.7.0
+
+        Fixed array handling for Object.defineOwnProperty (ES5 conformance).
+
+        Fixed issue 1757 (string slices of external strings).
+
+        Fixed issue 1759 (ARM).
+
+        Added flag --noclever-optimizations to disable some things that
+        caused trouble in the past.
+
+        Added flag --stress-compaction for testing.
+
+        Added flag --harmony to activate all experimental Harmony features.
+
+
+2011-10-10: Version 3.6.6
+
+        Added a GC pause visualization tool.
+
+        Added presubmit=no and werror=no flags to Makefile.
+
+        ES5/Test262 conformance improvements.
+
+        Fixed compilation issues with GCC 4.5.x (issue 1743).
+
+        Bug fixes and performance improvements on all platforms.
+
+
+2011-10-05: Version 3.6.5
+
+        New incremental garbage collector.
+
+        Removed the hard heap size limit (soft heap size limit is still
+        700/1400Mbytes by default).
+
+        Implemented ES5 generic Array.prototype.toString (Issue 1361).
+
+        V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
+
+        Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
+
+        Fixed propertyIsEnumerable for numeric properties (Issue 1692).
+
+        Fixed the MinGW and Windows 2000 builds.
+
+        Fixed "Prototype chain is not searched if named property handler does
+        not set a property" (Issue 1636).
+
+        Made the RegExp.prototype object be a RegExp object (Issue 1217).
+
+        Disallowed future reserved words as labels in strict mode.
+
+        Fixed string split to correctly coerce the separator to a string
+        (Issue 1711).
+
+        API: Added an optional source length field to the Extension
+        constructor.
+
+        API: Added Debug::DisableAgent to match existing Debug::EnableAgent
+        (Issue 1573).
+
+        Added "native" target to Makefile for the benefit of Linux distros.
+
+        Fixed: debugger stops stepping outside evaluate (Issue 1639).
+
+        More work on ES-Harmony proxies.  Still hidden behind a flag.
+
+        Bug fixes and performance improvements on all platforms.
+
+
 2011-09-15: Version 3.6.4
 
         Fixed d8's broken readline history.
@@ -194,7 +422,7 @@
 
         Fix the debugger for strict-mode functions. (Chromium issue 89236)
 
-        Add GetPropertyAttribute method for Object in the API. (Patch by 
+        Add GetPropertyAttribute method for Object in the API. (Patch by
         Peter Varga)
 
         Fix -Wunused-but-set-variable for gcc-4.6 on x64. (Issue 1291)
diff --git a/Makefile b/Makefile
index a7b2731..09d1e8b 100644
--- a/Makefile
+++ b/Makefile
@@ -27,11 +27,12 @@
 
 
 # Variable default definitions. Override them by exporting them in your shell.
-CXX ?= "g++"  # For distcc: export CXX="distcc g++"
-LINK ?= "g++"
+CXX ?= g++
+LINK ?= g++
 OUTDIR ?= out
 TESTJOBS ?= -j16
 GYPFLAGS ?=
+TESTFLAGS ?=
 
 # Special build flags. Use them like this: "make library=shared"
 
@@ -50,6 +51,10 @@
 ifeq ($(disassembler), on)
   GYPFLAGS += -Dv8_enable_disassembler=1
 endif
+# objectprint=on
+ifeq ($(objectprint), on)
+  GYPFLAGS += -Dv8_object_print=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
@@ -72,14 +77,23 @@
 ifdef soname_version
   GYPFLAGS += -Dsoname_version=$(soname_version)
 endif
+# werror=no
+ifeq ($(werror), no)
+  GYPFLAGS += -Dwerror=''
+endif
+# presubmit=no
+ifeq ($(presubmit), no)
+  TESTFLAGS += --no-presubmit
+endif
 
 # ----------------- available targets: --------------------
 # - "dependencies": pulls in external dependencies (currently: GYP)
 # - any arch listed in ARCHES (see below)
 # - any mode listed in MODES
 # - every combination <arch>.<mode>, e.g. "ia32.release"
+# - "native": current host's architecture, release mode
 # - any of the above with .check appended, e.g. "ia32.release.check"
-# - default (no target specified): build all ARCHES and MODES
+# - default (no target specified): build all DEFAULT_ARCHES and MODES
 # - "check": build all targets and run all tests
 # - "<arch>.clean" for any <arch> in ARCHES
 # - "clean": clean all ARCHES
@@ -88,7 +102,8 @@
 
 # Architectures and modes to be compiled. Consider these to be internal
 # variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm
+ARCHES = ia32 x64 arm mips
+DEFAULT_ARCHES = ia32 x64 arm
 MODES = release debug
 
 # List of files that trigger Makefile regeneration:
@@ -103,7 +118,7 @@
 # File where previously used GYPFLAGS are stored.
 ENVFILE = $(OUTDIR)/environment
 
-.PHONY: all check clean dependencies $(ENVFILE).new \
+.PHONY: all check clean dependencies $(ENVFILE).new native \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
         $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
 
@@ -112,7 +127,7 @@
 
 # Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
-$(MODES): $(addsuffix .$$@,$(ARCHES))
+$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
 
 $(ARCHES): $(addprefix $$@.,$(MODES))
 
@@ -124,21 +139,32 @@
 	                     python -c "print raw_input().capitalize()") \
 	         builddir="$(shell pwd)/$(OUTDIR)/$@"
 
+native: $(OUTDIR)/Makefile-native
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
+	         CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
+	         builddir="$(shell pwd)/$(OUTDIR)/$@"
+
 # Test targets.
 check: all
-	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
+	    $(TESTFLAGS)
 
 $(addsuffix .check,$(MODES)): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --mode=$(basename $@)
+	    --mode=$(basename $@) $(TESTFLAGS)
 
 $(addsuffix .check,$(ARCHES)): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --arch=$(basename $@)
+	    --arch=$(basename $@) $(TESTFLAGS)
 
 $(CHECKS): $$(basename $$@)
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
-	    --arch-and-mode=$(basename $@)
+	    --arch-and-mode=$(basename $@) $(TESTFLAGS)
+
+native.check: native
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
+	    --arch-and-mode=. $(TESTFLAGS)
 
 # Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean,$(ARCHES)):
@@ -147,7 +173,12 @@
 	rm -rf $(OUTDIR)/$(basename $@).debug
 	find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
 
-clean: $(addsuffix .clean,$(ARCHES))
+native.clean:
+	rm -f $(OUTDIR)/Makefile-native
+	rm -rf $(OUTDIR)/native
+	find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
+
+clean: $(addsuffix .clean,$(ARCHES)) native.clean
 
 # GYP file generation targets.
 $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@@ -160,11 +191,20 @@
 	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
 	              -S-x64 $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
+$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
 	              -S-arm $(GYPFLAGS)
 
+$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \
+	              -S-mips $(GYPFLAGS)
+
+$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
+
 # Replaces the old with the new environment file if they're different, which
 # will trigger GYP to regenerate Makefiles.
 $(ENVFILE): $(ENVFILE).new
diff --git a/SConstruct b/SConstruct
index f9c33ca..3f12907 100644
--- a/SConstruct
+++ b/SConstruct
@@ -129,6 +129,10 @@
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
+    'os:netbsd': {
+      'CPPPATH' : ['/usr/pkg/include'],
+      'LIBPATH' : ['/usr/pkg/lib'],
+    },
     'os:win32': {
       'CCFLAGS':      ['-DWIN32'],
       'CXXFLAGS':     ['-DWIN32'],
@@ -364,6 +368,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'os:netbsd': {
+      'LIBS': ['execinfo', 'pthread']
+    },
     'compress_startup_data:bz2': {
       'os:linux': {
         'LIBS': ['bz2']
@@ -428,6 +435,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32']
     },
+    'os:netbsd': {
+      'LIBS':         ['execinfo', 'pthread']
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
@@ -487,6 +497,10 @@
     'os:win32': {
       'LIBS':         ['winmm', 'ws2_32']
     },
+    'os:netbsd': {
+      'LIBPATH' :     ['/usr/pkg/lib'],
+      'LIBS':         ['execinfo', 'pthread']
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS,
       'armeabi:soft' : {
@@ -818,6 +832,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'os:netbsd': {
+      'LIBS': ['pthread'],
+    },
     'arch:arm': {
       'LINKFLAGS':   ARM_LINK_FLAGS
     },
@@ -951,7 +968,7 @@
     'help': 'the architecture to build for'
   },
   'os': {
-    'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin'],
+    'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin', 'netbsd'],
     'guess': GuessOS,
     'help': 'the os to build for'
   },
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index bdd9c2b..915b32f 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-V8 3.6.6.19
-http://v8.googlecode.com/svn/branches/3.6@10446
+V8 3.7.12.28
+http://v8.googlecode.com/svn/branches/3.7@10897
diff --git a/benchmarks/spinning-balls/index.html b/benchmarks/spinning-balls/index.html
new file mode 100644
index 0000000..d01f31f
--- /dev/null
+++ b/benchmarks/spinning-balls/index.html
@@ -0,0 +1,11 @@
+<html>
+<head>
+  <style>
+    body { text-align: center; }
+  </style>
+</head>
+<body>
+  <script type="text/javascript" src="splay-tree.js"></script>
+  <script type="text/javascript" src="v.js"></script>
+</body>
+</html>
diff --git a/benchmarks/spinning-balls/splay-tree.js b/benchmarks/spinning-balls/splay-tree.js
new file mode 100644
index 0000000..a88e4cb
--- /dev/null
+++ b/benchmarks/spinning-balls/splay-tree.js
@@ -0,0 +1,326 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * Constructs a Splay tree.  A splay tree is a self-balancing binary
+ * search tree with the additional property that recently accessed
+ * elements are quick to access again. It performs basic operations
+ * such as insertion, look-up and removal in O(log(n)) amortized time.
+ *
+ * @constructor
+ */
+function SplayTree() {
+};
+
+
+/**
+ * Pointer to the root node of the tree.
+ *
+ * @type {SplayTree.Node}
+ * @private
+ */
+SplayTree.prototype.root_ = null;
+
+
+/**
+ * @return {boolean} Whether the tree is empty.
+ */
+SplayTree.prototype.isEmpty = function() {
+  return !this.root_;
+};
+
+
+/**
+ * Inserts a node into the tree with the specified key and value if
+ * the tree does not already contain a node with the specified key. If
+ * the value is inserted, it becomes the root of the tree.
+ *
+ * @param {number} key Key to insert into the tree.
+ * @param {*} value Value to insert into the tree.
+ */
+SplayTree.prototype.insert = function(key, value) {
+  if (this.isEmpty()) {
+    this.root_ = new SplayTree.Node(key, value);
+    return;
+  }
+  // Splay on the key to move the last node on the search path for
+  // the key to the root of the tree.
+  this.splay_(key);
+  if (this.root_.key == key) {
+    return;
+  }
+  var node = new SplayTree.Node(key, value);
+  if (key > this.root_.key) {
+    node.left = this.root_;
+    node.right = this.root_.right;
+    this.root_.right = null;
+  } else {
+    node.right = this.root_;
+    node.left = this.root_.left;
+    this.root_.left = null;
+  }
+  this.root_ = node;
+};
+
+
+/**
+ * Removes a node with the specified key from the tree if the tree
+ * contains a node with this key. The removed node is returned. If the
+ * key is not found, an exception is thrown.
+ *
+ * @param {number} key Key to find and remove from the tree.
+ * @return {SplayTree.Node} The removed node.
+ */
+SplayTree.prototype.remove = function(key) {
+  if (this.isEmpty()) {
+    throw Error('Key not found: ' + key);
+  }
+  this.splay_(key);
+  if (this.root_.key != key) {
+    throw Error('Key not found: ' + key);
+  }
+  var removed = this.root_;
+  if (!this.root_.left) {
+    this.root_ = this.root_.right;
+  } else {
+    var right = this.root_.right;
+    this.root_ = this.root_.left;
+    // Splay to make sure that the new root has an empty right child.
+    this.splay_(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    this.root_.right = right;
+  }
+  return removed;
+};
+
+
+/**
+ * Returns the node having the specified key or null if the tree doesn't contain
+ * a node with the specified key.
+ *
+ * @param {number} key Key to find in the tree.
+ * @return {SplayTree.Node} Node having the specified key.
+ */
+SplayTree.prototype.find = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  this.splay_(key);
+  return this.root_.key == key ? this.root_ : null;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value.
+ */
+SplayTree.prototype.findMax = function(opt_startNode) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  var current = opt_startNode || this.root_;
+  while (current.right) {
+    current = current.right;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value that
+ *     is less than the specified key value.
+ */
+SplayTree.prototype.findGreatestLessThan = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  this.splay_(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  if (this.root_.key < key) {
+    return this.root_;
+  } else if (this.root_.left) {
+    return this.findMax(this.root_.left);
+  } else {
+    return null;
+  }
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the keys of tree's nodes.
+ */
+SplayTree.prototype.exportKeys = function() {
+  var result = [];
+  if (!this.isEmpty()) {
+    this.root_.traverse_(function(node) { result.push(node.key); });
+  }
+  return result;
+};
+
+
+/**
+ * Perform the splay operation for the given key. Moves the node with
+ * the given key to the top of the tree.  If no node has the given
+ * key, the last node on the search path is moved to the top of the
+ * tree. This is the simplified top-down splaying algorithm from:
+ * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
+ *
+ * @param {number} key Key to splay the tree on.
+ * @private
+ */
+SplayTree.prototype.splay_ = function(key) {
+  if (this.isEmpty()) {
+    return;
+  }
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  var dummy, left, right;
+  dummy = left = right = new SplayTree.Node(null, null);
+  var current = this.root_;
+  while (true) {
+    if (key < current.key) {
+      if (!current.left) {
+        break;
+      }
+      if (key < current.left.key) {
+        // Rotate right.
+        var tmp = current.left;
+        current.left = tmp.right;
+        tmp.right = current;
+        current = tmp;
+        if (!current.left) {
+          break;
+        }
+      }
+      // Link right.
+      right.left = current;
+      right = current;
+      current = current.left;
+    } else if (key > current.key) {
+      if (!current.right) {
+        break;
+      }
+      if (key > current.right.key) {
+        // Rotate left.
+        var tmp = current.right;
+        current.right = tmp.left;
+        tmp.left = current;
+        current = tmp;
+        if (!current.right) {
+          break;
+        }
+      }
+      // Link left.
+      left.right = current;
+      left = current;
+      current = current.right;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left.right = current.left;
+  right.left = current.right;
+  current.left = dummy.right;
+  current.right = dummy.left;
+  this.root_ = current;
+};
+
+
+/**
+ * Constructs a Splay tree node.
+ *
+ * @param {number} key Key.
+ * @param {*} value Value.
+ */
+SplayTree.Node = function(key, value) {
+  this.key = key;
+  this.value = value;
+};
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.left = null;
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.right = null;
+
+
+/**
+ * Performs an ordered traversal of the subtree starting at
+ * this SplayTree.Node.
+ *
+ * @param {function(SplayTree.Node)} f Visitor function.
+ * @private
+ */
+SplayTree.Node.prototype.traverse_ = function(f) {
+  var current = this;
+  while (current) {
+    var left = current.left;
+    if (left) left.traverse_(f);
+    f(current);
+    current = current.right;
+  }
+};
+
+SplayTree.prototype.traverseBreadthFirst = function (f) {
+  if (f(this.root_.value)) return;
+
+  var stack = [this.root_];
+  var length = 1;
+
+  while (length > 0) {
+    var new_stack = new Array(stack.length * 2);
+    var new_length = 0;
+    for (var i = 0; i < length; i++) {
+      var n = stack[i];
+      var l = n.left;
+      var r = n.right;
+      if (l) {
+        if (f(l.value)) return;
+        new_stack[new_length++] = l;
+      }
+      if (r) {
+        if (f(r.value)) return;
+        new_stack[new_length++] = r;
+      }
+    }
+    stack = new_stack;
+    length = new_length;
+  }
+};
diff --git a/benchmarks/spinning-balls/v.js b/benchmarks/spinning-balls/v.js
new file mode 100644
index 0000000..5ae1194
--- /dev/null
+++ b/benchmarks/spinning-balls/v.js
@@ -0,0 +1,498 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * This function provides requestAnimationFrame in a cross browser way.
+ * http://paulirish.com/2011/requestanimationframe-for-smart-animating/
+ */
+if ( !window.requestAnimationFrame ) {
+  window.requestAnimationFrame = ( function() {
+    return window.webkitRequestAnimationFrame ||
+        window.mozRequestAnimationFrame ||
+        window.oRequestAnimationFrame ||
+        window.msRequestAnimationFrame ||
+        function(callback, element) {
+          window.setTimeout( callback, 1000 / 60 );
+        };
+  } )();
+}
+
+var kNPoints = 8000;
+var kNModifications = 20;
+var kNVisiblePoints = 200;
+var kDecaySpeed = 20;
+
+var kPointRadius = 4;
+var kInitialLifeForce = 100;
+
+var livePoints = void 0;
+var dyingPoints = void 0;
+var scene = void 0;
+var renderingStartTime = void 0;
+var scene = void 0;
+var pausePlot = void 0;
+var splayTree = void 0;
+var numberOfFrames = 0;
+var sumOfSquaredPauses = 0;
+var benchmarkStartTime = void 0;
+var benchmarkTimeLimit = void 0;
+var autoScale = void 0;
+var pauseDistribution = [];
+
+
+function Point(x, y, z, payload) {
+  this.x = x;
+  this.y = y;
+  this.z = z;
+
+  this.next = null;
+  this.prev = null;
+  this.payload = payload;
+  this.lifeForce = kInitialLifeForce;
+}
+
+
+Point.prototype.color = function () {
+  return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
+};
+
+
+Point.prototype.decay = function () {
+  this.lifeForce -= kDecaySpeed;
+  return this.lifeForce <= 0;
+};
+
+
+function PointsList() {
+  this.head = null;
+  this.count = 0;
+}
+
+
+PointsList.prototype.add = function (point) {
+  if (this.head !== null) this.head.prev = point;
+  point.next = this.head;
+  this.head = point;
+  this.count++;
+}
+
+
+PointsList.prototype.remove = function (point) {
+  if (point.next !== null) {
+    point.next.prev = point.prev;
+  }
+  if (point.prev !== null) {
+    point.prev.next = point.next;
+  } else {
+    this.head = point.next;
+  }
+  this.count--;
+}
+
+
+function GeneratePayloadTree(depth, tag) {
+  if (depth == 0) {
+    return {
+      array  : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
+      string : 'String for key ' + tag + ' in leaf node'
+    };
+  } else {
+    return {
+      left:  GeneratePayloadTree(depth - 1, tag),
+      right: GeneratePayloadTree(depth - 1, tag)
+    };
+  }
+}
+
+
+// To make the benchmark results predictable, we replace Math.random
+// with a 100% deterministic alternative.
+Math.random = (function() {
+  var seed = 49734321;
+  return function() {
+    // Robert Jenkins' 32 bit integer hash function.
+    seed = ((seed + 0x7ed55d16) + (seed << 12))  & 0xffffffff;
+    seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
+    seed = ((seed + 0x165667b1) + (seed << 5))   & 0xffffffff;
+    seed = ((seed + 0xd3a2646c) ^ (seed << 9))   & 0xffffffff;
+    seed = ((seed + 0xfd7046c5) + (seed << 3))   & 0xffffffff;
+    seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
+    return (seed & 0xfffffff) / 0x10000000;
+  };
+})();
+
+
+function GenerateKey() {
+  // The benchmark framework guarantees that Math.random is
+  // deterministic; see base.js.
+  return Math.random();
+}
+
+function CreateNewPoint() {
+  // Insert new node with a unique key.
+  var key;
+  do { key = GenerateKey(); } while (splayTree.find(key) != null);
+
+  var point = new Point(Math.random() * 40 - 20,
+                        Math.random() * 40 - 20,
+                        Math.random() * 40 - 20,
+                        GeneratePayloadTree(5, "" + key));
+
+  livePoints.add(point);
+
+  splayTree.insert(key, point);
+  return key;
+}
+
+function ModifyPointsSet() {
+  if (livePoints.count < kNPoints) {
+    for (var i = 0; i < kNModifications; i++) {
+      CreateNewPoint();
+    }
+  } else if (kNModifications === 20) {
+    kNModifications = 80;
+    kDecay = 30;
+  }
+
+  for (var i = 0; i < kNModifications; i++) {
+    var key = CreateNewPoint();
+    var greatest = splayTree.findGreatestLessThan(key);
+    if (greatest == null) {
+      var point = splayTree.remove(key).value;
+    } else {
+      var point = splayTree.remove(greatest.key).value;
+    }
+    livePoints.remove(point);
+    point.payload = null;
+    dyingPoints.add(point);
+  }
+}
+
+
+function PausePlot(width, height, size, scale) {
+  var canvas = document.createElement("canvas");
+  canvas.width = this.width = width;
+  canvas.height = this.height = height;
+  document.body.appendChild(canvas);
+
+  this.ctx = canvas.getContext('2d');
+
+  if (typeof scale !== "number") {
+    this.autoScale = true;
+    this.maxPause = 0;
+  } else {
+    this.autoScale = false;
+    this.maxPause = scale;
+  }
+
+  this.size = size;
+
+  // Initialize cyclic buffer for pauses.
+  this.pauses = new Array(this.size);
+  this.start = this.size;
+  this.idx = 0;
+}
+
+
+PausePlot.prototype.addPause = function (p) {
+  if (this.idx === this.size) {
+    this.idx = 0;
+  }
+
+  if (this.idx === this.start) {
+    this.start++;
+  }
+
+  if (this.start === this.size) {
+    this.start = 0;
+  }
+
+  this.pauses[this.idx++] = p;
+};
+
+
+PausePlot.prototype.iteratePauses = function (f) {
+  if (this.start < this.idx) {
+    for (var i = this.start; i < this.idx; i++) {
+      f.call(this, i - this.start, this.pauses[i]);
+    }
+  } else {
+    for (var i = this.start; i < this.size; i++) {
+      f.call(this, i - this.start, this.pauses[i]);
+    }
+
+    var offs = this.size - this.start;
+    for (var i = 0; i < this.idx; i++) {
+      f.call(this, i + offs, this.pauses[i]);
+    }
+  }
+};
+
+
+PausePlot.prototype.draw = function () {
+  var first = null;
+
+  if (this.autoScale) {
+    this.iteratePauses(function (i, v) {
+      if (first === null) {
+        first = v;
+      }
+      this.maxPause = Math.max(v, this.maxPause);
+    });
+  }
+
+  var dx = this.width / this.size;
+  var dy = this.height / this.maxPause;
+
+  this.ctx.save();
+  this.ctx.clearRect(0, 0, this.width, this.height);
+  this.ctx.beginPath();
+  this.ctx.moveTo(1, dy * this.pauses[this.start]);
+  var p = first;
+  this.iteratePauses(function (i, v) {
+    var delta = v - p;
+    var x = 1 + dx * i;
+    var y = dy * v;
+    this.ctx.lineTo(x, y);
+    if (delta > 2 * (p / 3)) {
+      this.ctx.font = "bold 12px sans-serif";
+      this.ctx.textBaseline = "bottom";
+      this.ctx.fillText(v + "ms", x + 2, y);
+    }
+    p = v;
+  });
+  this.ctx.strokeStyle = "black";
+  this.ctx.stroke();
+  this.ctx.restore();
+}
+
+
+function Scene(width, height) {
+  var canvas = document.createElement("canvas");
+  canvas.width = width;
+  canvas.height = height;
+  document.body.appendChild(canvas);
+
+  this.ctx = canvas.getContext('2d');
+  this.width = canvas.width;
+  this.height = canvas.height;
+
+  // Projection configuration.
+  this.x0 = canvas.width / 2;
+  this.y0 = canvas.height / 2;
+  this.z0 = 100;
+  this.f  = 1000;  // Focal length.
+
+  // Camera is rotating around y-axis.
+  this.angle = 0;
+}
+
+
+Scene.prototype.drawPoint = function (x, y, z, color) {
+  // Rotate the camera around y-axis.
+  var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
+  var ry = y;
+  var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
+
+  // Perform perspective projection.
+  var px = (this.f * rx) / (rz - this.z0) + this.x0;
+  var py = (this.f * ry) / (rz - this.z0) + this.y0;
+
+  this.ctx.save();
+  this.ctx.fillStyle = color
+  this.ctx.beginPath();
+  this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
+  this.ctx.fill();
+  this.ctx.restore();
+};
+
+
+Scene.prototype.drawDyingPoints = function () {
+  var point_next = null;
+  for (var point = dyingPoints.head; point !== null; point = point_next) {
+    // Rotate the scene around y-axis.
+    scene.drawPoint(point.x, point.y, point.z, point.color());
+
+    point_next = point.next;
+
+    // Decay the current point and remove it from the list
+    // if it's life-force ran out.
+    if (point.decay()) {
+      dyingPoints.remove(point);
+    }
+  }
+};
+
+
+Scene.prototype.draw = function () {
+  this.ctx.save();
+  this.ctx.clearRect(0, 0, this.width, this.height);
+  this.drawDyingPoints();
+  this.ctx.restore();
+
+  this.angle += Math.PI / 90.0;
+};
+
+
+function updateStats(pause) {
+  numberOfFrames++;
+  if (pause > 20) {
+    sumOfSquaredPauses += (pause - 20) * (pause - 20);
+  }
+  pauseDistribution[Math.floor(pause / 10)] |= 0;
+  pauseDistribution[Math.floor(pause / 10)]++;
+}
+
+
+function renderStats() {
+  var msg = document.createElement("p");
+  msg.innerHTML = "Score " +
+    Math.round(numberOfFrames * 1000 / sumOfSquaredPauses);
+  var table = document.createElement("table");
+  table.align = "center";
+  for (var i = 0; i < pauseDistribution.length; i++) {
+    if (pauseDistribution[i] > 0) {
+      var row = document.createElement("tr");
+      var time = document.createElement("td");
+      var count = document.createElement("td");
+      time.innerHTML = i*10 + "-" + (i+1)*10 + "ms";
+      count.innerHTML = " => " + pauseDistribution[i];
+      row.appendChild(time);
+      row.appendChild(count);
+      table.appendChild(row);
+    }
+  }
+  div.appendChild(msg);
+  div.appendChild(table);
+}
+
+
+function render() {
+  if (typeof renderingStartTime === 'undefined') {
+    renderingStartTime = Date.now();
+    benchmarkStartTime = renderingStartTime;
+  }
+
+  ModifyPointsSet();
+
+  scene.draw();
+
+  var renderingEndTime = Date.now();
+  var pause = renderingEndTime - renderingStartTime;
+  pausePlot.addPause(pause);
+  renderingStartTime = renderingEndTime;
+
+  pausePlot.draw();
+
+  updateStats(pause);
+
+  div.innerHTML =
+      livePoints.count + "/" + dyingPoints.count + " " +
+      pause + "(max = " + pausePlot.maxPause + ") ms " +
+      numberOfFrames + " frames";
+
+  if (renderingEndTime < benchmarkStartTime + benchmarkTimeLimit) {
+    // Schedule next frame.
+    requestAnimationFrame(render);
+  } else {
+    renderStats();
+  }
+}
+
+
+function Form() {
+  function create(tag) { return document.createElement(tag); }
+  function text(value) { return document.createTextNode(value); }
+
+  this.form = create("form");
+  this.form.setAttribute("action", "javascript:start()");
+
+  var table = create("table");
+  table.setAttribute("style", "margin-left: auto; margin-right: auto;");
+
+  function col(a) {
+    var td = create("td");
+    td.appendChild(a);
+    return td;
+  }
+
+  function row(a, b) {
+    var tr = create("tr");
+    tr.appendChild(col(a));
+    tr.appendChild(col(b));
+    return tr;
+  }
+
+  this.timelimit = create("input");
+  this.timelimit.setAttribute("value", "60");
+
+  table.appendChild(row(text("Time limit in seconds"), this.timelimit));
+
+  this.autoscale = create("input");
+  this.autoscale.setAttribute("type", "checkbox");
+  this.autoscale.setAttribute("checked", "true");
+  table.appendChild(row(text("Autoscale pauses plot"), this.autoscale));
+
+  var button = create("input");
+  button.setAttribute("type", "submit");
+  button.setAttribute("value", "Start");
+  this.form.appendChild(table);
+  this.form.appendChild(button);
+
+  document.body.appendChild(this.form);
+}
+
+
+Form.prototype.remove = function () {
+  document.body.removeChild(this.form);
+};
+
+
+function init() {
+  livePoints = new PointsList;
+  dyingPoints = new PointsList;
+
+  splayTree = new SplayTree();
+
+  scene = new Scene(640, 480);
+
+  div = document.createElement("div");
+  document.body.appendChild(div);
+
+  pausePlot = new PausePlot(480, autoScale ? 240 : 500, 160, autoScale ? void 0 : 500);
+}
+
+function start() {
+  benchmarkTimeLimit = form.timelimit.value * 1000;
+  autoScale = form.autoscale.checked;
+  form.remove();
+  init();
+  render();
+}
+
+var form = new Form();
diff --git a/build/common.gypi b/build/common.gypi
index 4e896e0..c95096a 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -50,16 +50,24 @@
     # probing when running on the target.
     'v8_can_use_vfp_instructions%': 'false',
 
+    # Similar to vfp but on MIPS.
+    'v8_can_use_fpu_instructions%': 'true',
+
     # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
     # EABI calling convention where double arguments are passed in VFP
     # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
     # well when compiling for the ARM target.
     'v8_use_arm_eabi_hardfloat%': 'false',
 
+    # Similar to the ARM hard float ABI but on MIPS.
+    'v8_use_mips_abi_hardfloat%': 'true',
+
     'v8_enable_debugger_support%': 1,
 
     'v8_enable_disassembler%': 0,
 
+    'v8_object_print%': 0,
+
     'v8_enable_gdbjit%': 0,
 
     # Enable profiling support. Only required on Windows.
@@ -72,6 +80,7 @@
     'v8_use_snapshot%': 'true',
     'host_os%': '<(OS)',
     'v8_use_liveobjectlist%': 'false',
+    'werror%': '-Werror',
 
     # For a shared library build, results in "libv8-<(soname_version).so".
     'soname_version%': '',
@@ -84,6 +93,9 @@
       ['v8_enable_disassembler==1', {
         'defines': ['ENABLE_DISASSEMBLER',],
       }],
+      ['v8_object_print==1', {
+        'defines': ['OBJECT_PRINT',],
+      }],
       ['v8_enable_gdbjit==1', {
         'defines': ['ENABLE_GDB_JIT_INTERFACE',],
       }],
@@ -129,7 +141,7 @@
               }],
               # The ARM assembler assumes the host is 32 bits,
               # so force building 32-bit host tools.
-              ['host_arch=="x64"', {
+              ['host_arch=="x64" or OS=="android"', {
                 'target_conditions': [
                   ['_toolset=="host"', {
                     'cflags': ['-m32'],
@@ -148,6 +160,33 @@
             'defines': [
               'V8_TARGET_ARCH_MIPS',
             ],
+            'conditions': [
+              [ 'v8_can_use_fpu_instructions=="true"', {
+                'defines': [
+                  'CAN_USE_FPU_INSTRUCTIONS',
+                ],
+              }],
+              [ 'v8_use_mips_abi_hardfloat=="true"', {
+                'defines': [
+                  '__mips_hard_float=1',
+                  'CAN_USE_FPU_INSTRUCTIONS',
+                ],
+              }, {
+                'defines': [
+                  '__mips_soft_float=1'
+                ],
+              }],
+              # The MIPS assembler assumes the host is 32 bits,
+              # so force building 32-bit host tools.
+              ['host_arch=="x64"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'cflags': ['-m32'],
+                    'ldflags': ['-m32'],
+                  }],
+                ],
+              }],
+            ],
           }],
           ['v8_target_arch=="x64"', {
             'defines': [
@@ -184,6 +223,9 @@
           }],
         ],
       }],
+      ['OS=="solaris"', {
+        'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
+      }],
     ],
     'configurations': {
       'Debug': {
@@ -218,8 +260,8 @@
             'cflags': [ '-I/usr/local/include' ],
           }],
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
-                        '-Wnon-virtual-dtor' ],
+            'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
+                        '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
           }],
         ],
       },
@@ -261,7 +303,7 @@
           }],
           ['OS=="win"', {
             'msvs_configuration_attributes': {
-              'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
+              'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
               'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
               'CharacterSet': '1',
             },
diff --git a/build/gyp_v8 b/build/gyp_v8
index dfdbe3f..4293e76 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -171,3 +171,8 @@
     gyp_args.append('-I' + v8_root + '/build/armu.gypi')
     gyp_args.append('-S-armu')
     run_gyp(gyp_args)
+
+    gyp_args = list(args)
+    gyp_args.append('-I' + v8_root + '/build/mipsu.gypi')
+    gyp_args.append('-S-mipsu')
+    run_gyp(gyp_args)
diff --git a/build/mipsu.gypi b/build/mipsu.gypi
new file mode 100644
index 0000000..8dfea74
--- /dev/null
+++ b/build/mipsu.gypi
@@ -0,0 +1,33 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'target_arch': 'ia32',
+    'v8_target_arch': 'mips',
+  },
+}
diff --git a/build/standalone.gypi b/build/standalone.gypi
index cb5e133..4297f5c 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -35,27 +35,33 @@
     'msvs_multi_core_compile%': '1',
     'variables': {
       'variables': {
-        'conditions': [
-          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            # This handles the Linux platforms we generally deal with. Anything
-            # else gets passed through, which probably won't work very well; such
-            # hosts should pass an explicit target_arch to gyp.
-            'host_arch%':
-              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
-          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
-            'host_arch%': 'ia32',
-          }],
-        ],
+        'variables': {
+          'conditions': [
+            [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+              # This handles the Linux platforms we generally deal with. Anything
+              # else gets passed through, which probably won't work very well; such
+              # hosts should pass an explicit target_arch to gyp.
+              'host_arch%':
+                '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
+            }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+              'host_arch%': 'ia32',
+            }],
+          ],
+        },
+        'host_arch%': '<(host_arch)',
+        'target_arch%': '<(host_arch)',
       },
       'host_arch%': '<(host_arch)',
-      'target_arch%': '<(host_arch)',
+      'target_arch%': '<(target_arch)',
       'v8_target_arch%': '<(target_arch)',
     },
     'host_arch%': '<(host_arch)',
     'target_arch%': '<(target_arch)',
     'v8_target_arch%': '<(v8_target_arch)',
+    'werror%': '-Werror',
     'conditions': [
       ['(v8_target_arch=="arm" and host_arch!="arm") or \
+        (v8_target_arch=="mips" and host_arch!="mips") or \
         (v8_target_arch=="x64" and host_arch!="x64")', {
         'want_separate_host_toolset': 1,
       }, {
@@ -74,7 +80,7 @@
   'conditions': [
     [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
       'target_defaults': {
-        'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+        'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
                     '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
                     '-fno-exceptions', '-pedantic' ],
         'ldflags': [ '-pthread', ],
diff --git a/include/v8-debug.h b/include/v8-debug.h
old mode 100644
new mode 100755
index 504cbfe..9e85dc4
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -340,6 +340,11 @@
                           bool wait_for_connection = false);
 
   /**
+    * Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
+    */
+  static void DisableAgent();
+
+  /**
    * Makes V8 process all pending debug messages.
    *
    * From V8 point of view all debug messages come asynchronously (e.g. from
diff --git a/include/v8.h b/include/v8.h
index 4b7f6e7..7cad7b6 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1171,7 +1171,8 @@
    * Get the ExternalAsciiStringResource for an external ASCII string.
    * Returns NULL if IsExternalAscii() doesn't return true.
    */
-  V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+  V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
+      const;
 
   static inline String* Cast(v8::Value* obj);
 
@@ -1735,9 +1736,16 @@
    * kLineOffsetNotFound if no information available.
    */
   V8EXPORT int GetScriptLineNumber() const;
+  /**
+   * Returns zero based column number of function body and
+   * kLineOffsetNotFound if no information available.
+   */
+  V8EXPORT int GetScriptColumnNumber() const;
+  V8EXPORT Handle<Value> GetScriptId() const;
   V8EXPORT ScriptOrigin GetScriptOrigin() const;
   static inline Function* Cast(Value* obj);
   V8EXPORT static const int kLineOffsetNotFound;
+
  private:
   V8EXPORT Function();
   V8EXPORT static void CheckCast(Value* obj);
@@ -2451,24 +2459,42 @@
 
 // --- Extensions ---
 
+class V8EXPORT ExternalAsciiStringResourceImpl
+    : public String::ExternalAsciiStringResource {
+ public:
+  ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
+  ExternalAsciiStringResourceImpl(const char* data, size_t length)
+      : data_(data), length_(length) {}
+  const char* data() const { return data_; }
+  size_t length() const { return length_; }
+
+ private:
+  const char* data_;
+  size_t length_;
+};
 
 /**
  * Ignore
  */
 class V8EXPORT Extension {  // NOLINT
  public:
+  // Note that the strings passed into this constructor must live as long
+  // as the Extension itself.
   Extension(const char* name,
             const char* source = 0,
             int dep_count = 0,
-            const char** deps = 0);
+            const char** deps = 0,
+            int source_length = -1);
   virtual ~Extension() { }
   virtual v8::Handle<v8::FunctionTemplate>
       GetNativeFunction(v8::Handle<v8::String> name) {
     return v8::Handle<v8::FunctionTemplate>();
   }
 
-  const char* name() { return name_; }
-  const char* source() { return source_; }
+  const char* name() const { return name_; }
+  size_t source_length() const { return source_length_; }
+  const String::ExternalAsciiStringResource* source() const {
+    return &source_; }
   int dependency_count() { return dep_count_; }
   const char** dependencies() { return deps_; }
   void set_auto_enable(bool value) { auto_enable_ = value; }
@@ -2476,7 +2502,8 @@
 
  private:
   const char* name_;
-  const char* source_;
+  size_t source_length_;  // expected to initialize before source_
+  ExternalAsciiStringResourceImpl source_;
   int dep_count_;
   const char** deps_;
   bool auto_enable_;
@@ -3498,9 +3525,9 @@
  *
  * v8::Locker is a scoped lock object. While it's
  * active (i.e. between its construction and destruction) the current thread is
- * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
- * by at most one thread at any time. In other words, the scope of a v8::Locker is
- * a critical section.
+ * allowed to use the locked isolate. V8 guarantees that an isolate can be
+ * locked by at most one thread at any time. In other words, the scope of a
+ * v8::Locker is a critical section.
  *
  * Sample usage:
 * \code
@@ -3602,8 +3629,8 @@
   static void StopPreemption();
 
   /**
-   * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
-   * is locked by the current thread.
+   * Returns whether or not the locker for a given isolate, or default isolate
+   * if NULL is given, is locked by the current thread.
    */
   static bool IsLocked(Isolate* isolate = NULL);
 
@@ -3677,8 +3704,8 @@
 
 namespace internal {
 
-static const int kApiPointerSize = sizeof(void*);  // NOLINT
-static const int kApiIntSize = sizeof(int);  // NOLINT
+const int kApiPointerSize = sizeof(void*);  // NOLINT
+const int kApiIntSize = sizeof(int);  // NOLINT
 
 // Tag information for HeapObject.
 const int kHeapObjectTag = 1;
@@ -3769,7 +3796,7 @@
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x02;
 
-  static const int kJSObjectType = 0xa3;
+  static const int kJSObjectType = 0xa6;
   static const int kFirstNonstringType = 0x80;
   static const int kForeignType = 0x85;
 
diff --git a/preparser/preparser-process.cc b/preparser/preparser-process.cc
index e67851c..b0aeb81 100644
--- a/preparser/preparser-process.cc
+++ b/preparser/preparser-process.cc
@@ -267,34 +267,22 @@
 
 
 ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
+  // Parse ["throws" [<exn-type> [<start> [<end>]]]].
   ExceptionExpectation expects;
-
-  // Parse exception expectations from (the remainder of) the command line.
   int arg_index = 0;
-  // Skip any flags.
-  while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++;
+  while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
+    arg_index++;
+  }
   if (argc > arg_index) {
-    if (strncmp("throws", argv[arg_index], 7)) {
-      // First argument after filename, if present, must be the verbatim
-      // "throws", marking that the preparsing should fail with an exception.
-      fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
-    }
     expects.throws = true;
-    do {
-      arg_index++;
-    } while (argc > arg_index && IsFlag(argv[arg_index]));
-    if (argc > arg_index) {
-      // Next argument is the exception type identifier.
+    arg_index++;
+    if (argc > arg_index && !IsFlag(argv[arg_index])) {
       expects.type = argv[arg_index];
-      do {
-        arg_index++;
-      } while (argc > arg_index && IsFlag(argv[arg_index]));
-      if (argc > arg_index) {
+      arg_index++;
+      if (argc > arg_index && !IsFlag(argv[arg_index])) {
         expects.beg_pos = atoi(argv[arg_index]);  // NOLINT
-        do {
-          arg_index++;
-        } while (argc > arg_index && IsFlag(argv[arg_index]));
-        if (argc > arg_index) {
+        arg_index++;
+        if (argc > arg_index && !IsFlag(argv[arg_index])) {
           expects.end_pos = atoi(argv[arg_index]);  // NOLINT
         }
       }
@@ -308,7 +296,8 @@
   // Parse command line.
   // Format:  preparser (<scriptfile> | -e "<source>")
   //                    ["throws" [<exn-type> [<start> [<end>]]]]
-  // Any flags (except an initial -s) are ignored.
+  // Any flags (except an initial -e) are ignored.
+  // Flags must not separate "throws" and its arguments.
 
   // Check for mandatory filename argument.
   int arg_index = 1;
diff --git a/src/SConscript b/src/SConscript
old mode 100644
new mode 100755
index 52607f1..42de36b
--- a/src/SConscript
+++ b/src/SConscript
@@ -84,6 +84,7 @@
     hydrogen.cc
     hydrogen-instructions.cc
     ic.cc
+    incremental-marking.cc
     inspector.cc
     interpreter-irregexp.cc
     isolate.cc
@@ -133,6 +134,7 @@
     v8utils.cc
     variables.cc
     version.cc
+    store-buffer.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
@@ -170,6 +172,9 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
+    mips/lithium-codegen-mips.cc
+    mips/lithium-gap-resolver-mips.cc
+    mips/lithium-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
     mips/stub-cache-mips.cc
@@ -319,7 +324,7 @@
 
 EXPERIMENTAL_LIBRARY_FILES = '''
 proxy.js
-weakmap.js
+collection.js
 '''.split()
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 951209d..e60f568 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -527,7 +527,9 @@
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
     Handle<JSFunction> handle(function);
-    if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
+    if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
+      return Failure::Exception();
+    }
     return Smi::FromInt(handle->shared()->length());
   } else {
     return Smi::FromInt(function->shared()->length());
@@ -619,8 +621,9 @@
 
       if (!frame->is_optimized()) {
         // If there is an arguments variable in the stack, we return that.
-        Handle<SerializedScopeInfo> info(function->shared()->scope_info());
-        int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
+        Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+        int index = scope_info->StackSlotIndex(
+            isolate->heap()->arguments_symbol());
         if (index >= 0) {
           Handle<Object> arguments(frame->GetExpression(index), isolate);
           if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -672,7 +675,7 @@
     Isolate* isolate,
     JSFunction* caller) {
   DisableAssertNoAllocation enable_allocation;
-  if (caller->shared()->strict_mode()) {
+  if (!caller->shared()->is_classic_mode()) {
     return isolate->Throw(
         *isolate->factory()->NewTypeError("strict_caller",
                                           HandleVector<Object>(NULL, 0)));
@@ -759,7 +762,12 @@
     caller = potential_caller;
     potential_caller = it.next();
   }
-
+  // If caller is bound, return null. This is compatible with JSC, and
+  // allows us to make bound functions use the strict function map
+  // and its associated throwing caller and arguments.
+  if (caller->shared()->bound()) {
+    return isolate->heap()->null_value();
+  }
   return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
diff --git a/src/allocation.h b/src/allocation.h
index 75aba35..00c5664 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -81,7 +81,7 @@
 
 
 template <typename T>
-static T* NewArray(int size) {
+T* NewArray(int size) {
   T* result = new T[size];
   if (result == NULL) Malloced::FatalProcessOutOfMemory();
   return result;
@@ -89,7 +89,7 @@
 
 
 template <typename T>
-static void DeleteArray(T* array) {
+void DeleteArray(T* array) {
   delete[] array;
 }
 
diff --git a/src/api.cc b/src/api.cc
index 39c0d02..f60b2cd 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -185,7 +185,10 @@
   int end_marker;
   heap_stats.end_marker = &end_marker;
   i::Isolate* isolate = i::Isolate::Current();
-  isolate->heap()->RecordStats(&heap_stats, take_snapshot);
+  // BUG(1718):
+  // Don't use the take_snapshot since we don't support HeapIterator here
+  // without doing a special GC.
+  isolate->heap()->RecordStats(&heap_stats, false);
   i::V8::SetFatalError();
   FatalErrorCallback callback = GetFatalErrorHandler();
   {
@@ -483,7 +486,7 @@
 
 
 RegisteredExtension::RegisteredExtension(Extension* extension)
-    : extension_(extension), state_(UNVISITED) { }
+    : extension_(extension) { }
 
 
 void RegisteredExtension::Register(RegisteredExtension* that) {
@@ -501,9 +504,12 @@
 Extension::Extension(const char* name,
                      const char* source,
                      int dep_count,
-                     const char** deps)
+                     const char** deps,
+                     int source_length)
     : name_(name),
-      source_(source),
+      source_length_(source_length >= 0 ?
+                  source_length : (source ? strlen(source) : 0)),
+      source_(source, source_length_),
       dep_count_(dep_count),
       deps_(deps),
       auto_enable_(false) { }
@@ -1407,7 +1413,7 @@
 ScriptData* ScriptData::PreCompile(const char* input, int length) {
   i::Utf8ToUC16CharacterStream stream(
       reinterpret_cast<const unsigned char*>(input), length);
-  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
 }
 
 
@@ -1416,10 +1422,10 @@
   if (str->IsExternalTwoByteString()) {
     i::ExternalTwoByteStringUC16CharacterStream stream(
       i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
   } else {
     i::GenericStringUC16CharacterStream stream(str, 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
   }
 }
 
@@ -1788,7 +1794,7 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> recv,
                                                int argc,
-                                               i::Object** argv[],
+                                               i::Handle<i::Object> argv[],
                                                bool* has_pending_exception) {
   i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@@ -1805,10 +1811,10 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> data,
                                                bool* has_pending_exception) {
-  i::Object** argv[1] = { data.location() };
+  i::Handle<i::Object> argv[] = { data };
   return CallV8HeapFunction(name,
                             i::Isolate::Current()->js_builtins_object(),
-                            1,
+                            ARRAY_SIZE(argv),
                             argv,
                             has_pending_exception);
 }
@@ -2628,10 +2634,11 @@
   if (obj->IsJSObject() && other->IsJSObject()) {
     return *obj == *other;
   }
-  i::Object** args[1] = { other.location() };
+  i::Handle<i::Object> args[] = { other };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result =
-      CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
+      CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
+                         &has_pending_exception);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -2794,7 +2801,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetElement(self, index);
+  i::Handle<i::Object> result = i::Object::GetElement(self, index);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(result);
@@ -2874,8 +2881,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
+      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
+  if (threw) return Local<v8::Array>();
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -2893,8 +2902,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
+  if (threw) return Local<v8::Array>();
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -3093,7 +3104,10 @@
   // If the property being looked up is a callback, it can throw
   // an exception.
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+  PropertyAttributes ignored;
+  i::Handle<i::Object> result =
+      i::Object::GetProperty(receiver, receiver, lookup, name,
+                             &ignored);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
 
@@ -3110,7 +3124,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup;
+  i::LookupResult lookup(isolate);
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3123,7 +3137,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup;
+  i::LookupResult lookup(isolate);
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3211,21 +3225,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::ALLOW_CREATION));
-  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> obj = i::SetProperty(
-      hidden_props,
-      key_obj,
-      value_obj,
-      static_cast<PropertyAttributes>(None),
-      i::kNonStrictMode);
-  has_pending_exception = obj.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, false);
-  return true;
+  i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
+  return *result == *self;
 }
 
 
@@ -3235,20 +3238,9 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
-  has_pending_exception = result.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
-  if (result->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
+  i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+  if (result->IsUndefined()) return v8::Local<v8::Value>();
   return Utils::ToLocal(result);
 }
 
@@ -3259,15 +3251,9 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return true;
-  }
-  i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  return i::DeleteProperty(js_obj, key_obj)->IsTrue();
+  self->DeleteHiddenProperty(*key_obj);
+  return true;
 }
 
 
@@ -3317,22 +3303,12 @@
   i::Handle<i::ExternalArray> array =
       isolate->factory()->NewExternalArray(length, array_type, data);
 
-  // If the object already has external elements, create a new, unique
-  // map if the element type is now changing, because assumptions about
-  // generated code based on the receiver's map will be invalid.
-  i::Handle<i::HeapObject> elements(object->elements());
-  bool cant_reuse_map =
-      elements->map()->IsUndefined() ||
-      !elements->map()->has_external_array_elements() ||
-      elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
-  if (cant_reuse_map) {
-    i::Handle<i::Map> external_array_map =
-        isolate->factory()->GetElementsTransitionMap(
-            i::Handle<i::Map>(object->map()),
-            GetElementsKindFromExternalArrayType(array_type),
-            object->HasFastProperties());
-    object->set_map(*external_array_map);
-  }
+  i::Handle<i::Map> external_array_map =
+      isolate->factory()->GetElementsTransitionMap(
+          object,
+          GetElementsKindFromExternalArrayType(array_type));
+
+  object->set_map(*external_array_map);
   object->set_elements(*array);
 }
 
@@ -3491,7 +3467,8 @@
 }
 
 
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+                                        int argc,
                                         v8::Handle<v8::Value> argv[]) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@@ -3502,7 +3479,7 @@
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
   if (obj->IsJSFunction()) {
     fun = i::Handle<i::JSFunction>::cast(obj);
@@ -3532,7 +3509,7 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   if (obj->IsJSFunction()) {
     i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
     EXCEPTION_PREAMBLE(isolate);
@@ -3574,7 +3551,7 @@
   HandleScope scope;
   i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> returned =
       i::Execution::New(function, argc, args, &has_pending_exception);
@@ -3595,7 +3572,7 @@
     i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
     i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
     STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-    i::Object*** args = reinterpret_cast<i::Object***>(argv);
+    i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@@ -3649,6 +3626,23 @@
 }
 
 
+int Function::GetScriptColumnNumber() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (func->shared()->script()->IsScript()) {
+    i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+    return i::GetScriptColumnNumber(script, func->shared()->start_position());
+  }
+  return kLineOffsetNotFound;
+}
+
+Handle<Value> Function::GetScriptId() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (!func->shared()->script()->IsScript())
+    return v8::Undefined();
+  i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+  return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+}
+
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -3671,13 +3665,30 @@
   if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
   LOG_API(isolate, "String::WriteUtf8");
   ENTER_V8(isolate);
-  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   i::Handle<i::String> str = Utils::OpenHandle(this);
+  if (str->IsAsciiRepresentation()) {
+    int len;
+    if (capacity == -1) {
+      capacity = str->length() + 1;
+      len = str->length();
+    } else {
+      len = i::Min(capacity, str->length());
+    }
+    i::String::WriteToFlat(*str, buffer, 0, len);
+    if (nchars_ref != NULL) *nchars_ref = len;
+    if (!(options & NO_NULL_TERMINATION) && capacity > len) {
+      buffer[len] = '\0';
+      return len + 1;
+    }
+    return len;
+  }
+
+  i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
   isolate->string_tracker()->RecordWrite(str);
   if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
-    str->TryFlatten();
+    FlattenString(str);
   }
   write_input_buffer.Reset(0, *str);
   int len = str->length();
@@ -3806,10 +3817,11 @@
 void v8::String::VerifyExternalStringResource(
     v8::String::ExternalStringResource* value) const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  v8::String::ExternalStringResource* expected;
+  const v8::String::ExternalStringResource* expected;
   if (i::StringShape(*str).IsExternalTwoByte()) {
-    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
-    expected = reinterpret_cast<ExternalStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    expected = reinterpret_cast<const ExternalStringResource*>(resource);
   } else {
     expected = NULL;
   }
@@ -3817,7 +3829,7 @@
 }
 
 
-v8::String::ExternalAsciiStringResource*
+const v8::String::ExternalAsciiStringResource*
       v8::String::GetExternalAsciiStringResource() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(),
@@ -3825,8 +3837,9 @@
     return NULL;
   }
   if (i::StringShape(*str).IsExternalAscii()) {
-    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
   } else {
     return NULL;
   }
@@ -3996,6 +4009,15 @@
 
 
 void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
+  if (!i::Isolate::Current()->IsInitialized()) {
+    // Isolate is unitialized thus heap is not configured yet.
+    heap_statistics->set_total_heap_size(0);
+    heap_statistics->set_total_heap_size_executable(0);
+    heap_statistics->set_used_heap_size(0);
+    heap_statistics->set_heap_size_limit(0);
+    return;
+  }
+
   i::Heap* heap = i::Isolate::Current()->heap();
   heap_statistics->set_total_heap_size(heap->CommittedMemory());
   heap_statistics->set_total_heap_size_executable(
@@ -4008,15 +4030,16 @@
 bool v8::V8::IdleNotification() {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
-  if (!i::Isolate::Current()->IsInitialized()) return true;
+  i::Isolate* isolate = i::Isolate::Current();
+  if (isolate == NULL || !isolate->IsInitialized()) return true;
   return i::V8::IdleNotification();
 }
 
 
 void v8::V8::LowMemoryNotification() {
   i::Isolate* isolate = i::Isolate::Current();
-  if (!isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllGarbage(true);
+  if (isolate == NULL || !isolate->IsInitialized()) return;
+  isolate->heap()->CollectAllAvailableGarbage();
 }
 
 
@@ -4110,8 +4133,9 @@
   }
   // Leave V8.
 
-  if (env.is_null())
+  if (env.is_null()) {
     return Persistent<Context>();
+  }
   return Persistent<Context>(Utils::ToLocal(env));
 }
 
@@ -4299,7 +4323,7 @@
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
 }
 
 
@@ -4325,7 +4349,7 @@
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
   } else if (value->IsForeign()) {
-    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
   } else {
     return NULL;
   }
@@ -4535,15 +4559,13 @@
 
 
 bool v8::String::CanMakeExternal() {
+  if (!internal::FLAG_clever_optimizations) return false;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
-  if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
-    return false;
-  }
+  if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
   int size = obj->Size();  // Byte size of the original string.
-  if (size < i::ExternalString::kSize)
-    return false;
+  if (size < i::ExternalString::kShortSize) return false;
   i::StringShape shape(*obj);
   return !shape.IsExternal();
 }
@@ -4877,7 +4899,7 @@
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
     i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
-    if (callback_obj->address() == FUNCTION_ADDR(that)) {
+    if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
@@ -5487,6 +5509,12 @@
                                                        wait_for_connection);
 }
 
+
+void Debug::DisableAgent() {
+  return i::Isolate::Current()->debugger()->StopAgent();
+}
+
+
 void Debug::ProcessDebugMessages() {
   i::Execution::ProcessDebugMesssages(true);
 }
diff --git a/src/api.h b/src/api.h
index 07723cb..a825dd7 100644
--- a/src/api.h
+++ b/src/api.h
@@ -112,15 +112,16 @@
 }
 
 
-template <typename T> static inline T ToCData(v8::internal::Object* obj) {
+template <typename T> inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
+      reinterpret_cast<intptr_t>(
+          v8::internal::Foreign::cast(obj)->foreign_address()));
 }
 
 
 template <typename T>
-static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return FACTORY->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -136,10 +137,6 @@
 };
 
 
-enum ExtensionTraversalState {
-  UNVISITED, VISITED, INSTALLED
-};
-
 
 class RegisteredExtension {
  public:
@@ -148,14 +145,11 @@
   Extension* extension() { return extension_; }
   RegisteredExtension* next() { return next_; }
   RegisteredExtension* next_auto() { return next_auto_; }
-  ExtensionTraversalState state() { return state_; }
-  void set_state(ExtensionTraversalState value) { state_ = value; }
   static RegisteredExtension* first_extension() { return first_extension_; }
  private:
   Extension* extension_;
   RegisteredExtension* next_;
   RegisteredExtension* next_auto_;
-  ExtensionTraversalState state_;
   static RegisteredExtension* first_extension_;
 };
 
@@ -242,7 +236,7 @@
 
 
 template <class T>
-static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
   return reinterpret_cast<T*>(obj.location());
 }
 
@@ -483,7 +477,7 @@
 };
 
 
-static const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
+const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
 
 
 void HandleScopeImplementer::SaveContext(Context* context) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 5ad7b5a..2ec6c7c 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -71,7 +71,9 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
 }
 
@@ -81,9 +83,14 @@
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -105,9 +112,15 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -134,10 +147,17 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -154,6 +174,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -202,13 +227,13 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -228,13 +253,13 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
              ((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 0ec3692..329493a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -78,7 +78,9 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+                                CpuFeaturesImpliedByCompiler());
+  ASSERT(supported_ == 0 || supported_ == standard_features);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -86,8 +88,7 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also alowed for generated code in the
   // snapshot.
-  supported_ |= OS::CpuFeaturesImpliedByPlatform();
-  supported_ |= CpuFeaturesImpliedByCompiler();
+  supported_ |= standard_features;
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -2505,7 +2506,8 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2539,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index eeadaca..e88739e 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -301,10 +301,10 @@
 const DwVfpRegister d15 = { 15 };
 
 // Aliases for double registers.
-const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
-const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
-const DwVfpRegister kDoubleRegZero = d14;
-const DwVfpRegister kScratchDoubleReg = d15;
+static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
+static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
+static const DwVfpRegister& kDoubleRegZero = d14;
+static const DwVfpRegister& kScratchDoubleReg = d15;
 
 
 // Coprocessor register
@@ -1207,6 +1207,10 @@
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   // Read/patch instructions
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
@@ -1261,12 +1265,6 @@
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
-  // Read/patch instructions
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
-
   // Decode branch instruction at pos and return branch target pos
   int target_at(int pos);
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ae8cb56..d0136f5 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -86,12 +86,6 @@
 }
 
 
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -101,16 +95,19 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity > 0);
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
   // Load the initial map from the array function.
   __ ldr(scratch1, FieldMemOperand(array_function,
                                    JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -130,6 +127,11 @@
   __ mov(scratch3,  Operand(0, RelocInfo::NONE));
   __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
+  if (initial_capacity == 0) {
+    __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+    return;
+  }
+
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -138,7 +140,6 @@
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
-  STATIC_ASSERT(kSmiTag == 0);
   __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
 
   // Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -147,18 +148,29 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
   __ mov(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
 
-  // Fill the FixedArray with the hole value.
-  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < initial_capacity; i++) {
+  static const int kLoopUnfoldLimit = 4;
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    for (int i = 0; i < initial_capacity; i++) {
+      __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+    }
+  } else {
+    Label loop, entry;
+    __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+    __ b(&entry);
+    __ bind(&loop);
     __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+    __ bind(&entry);
+    __ cmp(scratch1, scratch2);
+    __ b(lt, &loop);
   }
 }
 
@@ -173,7 +185,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -181,32 +193,18 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
-
   // Load the initial map from the array function.
   __ ldr(elements_array_storage,
          FieldMemOperand(array_function,
                          JSFunction::kPrototypeOrInitialMapOffset));
 
-  // Check whether an empty sized array is requested.
-  __ tst(array_size, array_size);
-  __ b(ne, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize +
-             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch1,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ tst(array_size, array_size);
+    __ Assert(ne, "array size is unexpectedly 0");
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
-  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ mov(elements_array_end,
          Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@@ -226,7 +224,6 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
-  __ bind(&allocated);
   __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ str(elements_array_storage,
@@ -256,14 +253,6 @@
   ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
   STATIC_ASSERT(kSmiTag == 0);
-  __ tst(array_size, array_size);
-  // Length of the FixedArray is the number of pre-allocated elements if
-  // the actual JSArray has length 0 and the size of the JSArray for non-empty
-  // JSArrays. The length of a FixedArray is stored as a smi.
-  __ mov(array_size,
-         Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
-         LeaveCC,
-         eq);
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ str(array_size,
          MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -311,20 +300,20 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more;
+  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
 
   // Check for array construction with zero arguments or one.
   __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(ne, &argc_one_or_more);
 
   // Handle construction of an empty array.
+  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        r1,
                        r2,
                        r3,
                        r4,
                        r5,
-                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
   // Setup return value, remove receiver from stack and return.
@@ -339,6 +328,13 @@
   __ b(ne, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
+  __ tst(r2, r2);
+  __ b(ne, &not_empty_array);
+  __ Drop(1);  // Adjust stack.
+  __ mov(r0, Operand(0));  // Treat this as a call with argc of zero.
+  __ b(&empty_array);
+
+  __ bind(&not_empty_array);
   __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
   __ b(ne, call_generic_code);
 
@@ -582,10 +578,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, r0);
   __ b(&argument_is_string);
@@ -601,10 +598,11 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
@@ -617,12 +615,12 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that the function is not a smi.
   __ JumpIfSmi(r1, &non_function_call);
   // Check that the function is a JSFunction.
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &non_function_call);
+  __ b(ne, &slow);
 
   // Jump to the function-specific construct stub.
   __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -631,10 +629,19 @@
 
   // r0: number of arguments
   // r1: called object
+  // r2: object type
+  Label do_call;
+  __ bind(&slow);
+  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(ne, &non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing r0).
   __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(r5, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -650,321 +657,329 @@
   Isolate* isolate = masm->isolate();
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  __ push(r0);  // Smi-tagged arguments count.
-  __ push(r1);  // Constructor function.
+    // Preserve the two incoming parameters on the stack.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ push(r0);  // Smi-tagged arguments count.
+    __ push(r1);  // Constructor function.
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ mov(r2, Operand(debug_step_in_fp));
-    __ ldr(r2, MemOperand(r2));
-    __ tst(r2, r2);
-    __ b(ne, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ mov(r2, Operand(debug_step_in_fp));
+      __ ldr(r2, MemOperand(r2));
+      __ tst(r2, r2);
+      __ b(ne, &rt_call);
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // r1: constructor function
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ JumpIfSmi(r2, &rt_call);
-    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
-    __ b(ne, &rt_call);
+      // Load the initial map and verify that it is in fact a map.
+      // r1: constructor function
+      __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(r2, &rt_call);
+      __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+      __ b(ne, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // r1: constructor function
-    // r2: initial map
-    __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
-    __ b(eq, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // r1: constructor function
+      // r2: initial map
+      __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+      __ b(eq, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-          FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
-      __ ldrb(r4, constructor_count);
-      __ sub(r4, r4, Operand(1), SetCC);
-      __ strb(r4, constructor_count);
-      __ b(ne, &allocate);
-
-      __ Push(r1, r2);
-
-      __ push(r1);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(r2);
-      __ pop(r1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // r1: constructor function
-    // r2: initial map
-    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size
-    // r4: JSObject (not tagged)
-    __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(r5, r4);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
-    // Fill all the in-object properties with the appropriate filler.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size (in words)
-    // r4: JSObject (not tagged)
-    // r5: First in-object property of JSObject (not tagged)
-    __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
+        Label allocate;
+        // Decrease generous allocation count.
+        __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+            FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+        __ ldrb(r4, constructor_count);
+        __ sub(r4, r4, Operand(1), SetCC);
+        __ strb(r4, constructor_count);
+        __ b(ne, &allocate);
+
+        __ Push(r1, r2);
+
+        __ push(r1);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(r2);
+        __ pop(r1);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // r1: constructor function
+      // r2: initial map
+      __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size
+      // r4: JSObject (not tagged)
+      __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(r5, r4);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+      // Fill all the in-object properties with the appropriate filler.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size (in words)
+      // r4: JSObject (not tagged)
+      // r5: First in-object property of JSObject (not tagged)
+      __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      if (count_constructions) {
+        __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+        __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+        // r0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(r0, r6);
+          __ Assert(le, "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(r5, r0, r7);
         // To allow for truncation.
         __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
       }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r5, r6);
-      __ b(lt, &loop);
+      __ InitializeFieldsWithFiller(r5, r6, r7);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ add(r4, r4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // r1: constructor function
+      // r4: JSObject
+      // r5: start of next object (not tagged)
+      __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+      __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+              kBitsPerByte);
+      __ add(r3, r3, Operand(r6));
+      __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ sub(r3, r3, Operand(r6), SetCC);
+
+      // Done if no extra properties are to be allocated.
+      __ b(eq, &allocated);
+      __ Assert(pl, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: start of next object
+      __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          r0,
+          r5,
+          r6,
+          r2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+      __ mov(r2, r5);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+      __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+      // Initialize the fields to undefined.
+      // r1: constructor function
+      // r2: First element of FixedArray (not tagged)
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+          __ cmp(r7, r8);
+          __ Assert(eq, "Undefined value not loaded.");
+        }
+        __ b(&entry);
+        __ bind(&loop);
+        __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+        __ bind(&entry);
+        __ cmp(r2, r6);
+        __ b(lt, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // r1: constructor function
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated
+      // r1: constructor function
+      // r4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // r4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(r4, r5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ add(r4, r4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    // Allocate the new receiver object using the runtime call.
     // r1: constructor function
+    __ bind(&rt_call);
+    __ push(r1);  // argument for Runtime_NewObject
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(r4, r0);
+
+    // Receiver for constructor call allocated.
     // r4: JSObject
-    // r5: start of next object (not tagged)
-    __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
-    __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
-    __ add(r3, r3, Operand(r6));
-    __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
-    __ sub(r3, r3, Operand(r6), SetCC);
+    __ bind(&allocated);
+    __ push(r4);
 
-    // Done if no extra properties are to be allocated.
-    __ b(eq, &allocated);
-    __ Assert(pl, "Property allocation count failed.");
+    // Push the function and the allocated receiver from the stack.
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, kPointerSize));
+    __ push(r1);  // Constructor function.
+    __ push(r4);  // Receiver.
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: start of next object
-    __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        r0,
-        r5,
-        r6,
-        r2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
-    // Initialize the FixedArray.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
-    __ mov(r2, r5);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-    __ mov(r0, Operand(r3, LSL, kSmiTagSize));
-    __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
-    // Initialize the fields to undefined.
+    // Reload the number of arguments from the stack.
     // r1: constructor function
-    // r2: First element of FixedArray (not tagged)
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
-        __ cmp(r7, r8);
-        __ Assert(eq, "Undefined value not loaded.");
-      }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r2, r6);
-      __ b(lt, &loop);
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+    // Setup pointer to last argument.
+    __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Setup number of arguments for function call below
+    __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+    // Copy arguments and receiver to the expression stack.
+    // r0: number of arguments
+    // r2: address of last argument (caller sp)
+    // r1: constructor function
+    // r3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+    __ push(ip);
+    __ bind(&entry);
+    __ sub(r3, r3, Operand(2), SetCC);
+    __ b(ge, &loop);
+
+    // Call the function.
+    // r0: number of arguments
+    // r1: constructor function
+    if (is_api_function) {
+      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // r1: constructor function
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+    // Pop the function from the stack.
+    // sp[0]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ pop();
 
-    // Continue with JSObject being successfully allocated
-    // r1: constructor function
-    // r4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    // r0: result
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // r4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(r4, r5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(r0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ ldr(r0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // r1: constructor function
-  __ bind(&rt_call);
-  __ push(r1);  // argument for Runtime_NewObject
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(r4, r0);
-
-  // Receiver for constructor call allocated.
-  // r4: JSObject
-  __ bind(&allocated);
-  __ push(r4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, kPointerSize));
-  __ push(r1);  // Constructor function.
-  __ push(r4);  // Receiver.
-
-  // Reload the number of arguments from the stack.
-  // r1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below
-  __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
-  // Copy arguments and receiver to the expression stack.
-  // r0: number of arguments
-  // r2: address of last argument (caller sp)
-  // r1: constructor function
-  // r3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
-  __ push(ip);
-  __ bind(&entry);
-  __ sub(r3, r3, Operand(2), SetCC);
-  __ b(ge, &loop);
-
-  // Call the function.
-  // r0: number of arguments
-  // r1: constructor function
-  if (is_api_function) {
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ pop();
-
-  // Restore context from the frame.
-  // r0: result
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ JumpIfSmi(r0, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ ldr(r0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
   __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,60 +1012,64 @@
   // r4: argv
   // r5-r7, cp may be clobbered
 
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ mov(cp, Operand(0, RelocInfo::NONE));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  __ InitializeRootRegister();
+    // Set up the roots register.
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(masm->isolate());
+    __ mov(r10, Operand(roots_array_start));
 
-  // Push the function and the receiver onto the stack.
-  __ push(r1);
-  __ push(r2);
+    // Push the function and the receiver onto the stack.
+    __ push(r1);
+    __ push(r2);
 
-  // Copy arguments to the stack in a loop.
-  // r1: function
-  // r3: argc
-  // r4: argv, i.e. points to first arg
-  Label loop, entry;
-  __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
-  // r2 points past last arg.
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
-  __ ldr(r0, MemOperand(r0));  // dereference handle
-  __ push(r0);  // push parameter
-  __ bind(&entry);
-  __ cmp(r4, r2);
-  __ b(ne, &loop);
+    // Copy arguments to the stack in a loop.
+    // r1: function
+    // r3: argc
+    // r4: argv, i.e. points to first arg
+    Label loop, entry;
+    __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+    // r2 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+    __ ldr(r0, MemOperand(r0));  // dereference handle
+    __ push(r0);  // push parameter
+    __ bind(&entry);
+    __ cmp(r4, r2);
+    __ b(ne, &loop);
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-  __ mov(r5, Operand(r4));
-  __ mov(r6, Operand(r4));
-  __ mov(r7, Operand(r4));
-  if (kR9Available == 1) {
-    __ mov(r9, Operand(r4));
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mov(r5, Operand(r4));
+    __ mov(r6, Operand(r4));
+    __ mov(r7, Operand(r4));
+    if (kR9Available == 1) {
+      __ mov(r9, Operand(r4));
+    }
+
+    // Invoke the code and pass argc as r0.
+    __ mov(r0, Operand(r3));
+    if (is_construct) {
+      __ Call(masm->isolate()->builtins()->JSConstructCall());
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+    // Respect ABI stack constraint.
   }
-
-  // Invoke the code and pass argc as r0.
-  __ mov(r0, Operand(r3));
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Exit the JS frame and remove the parameters (except function), and return.
-  // Respect ABI stack constraint.
-  __ LeaveInternalFrame();
   __ Jump(lr);
 
   // r0: result
@@ -1069,26 +1088,27 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1097,26 +1117,27 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1125,12 +1146,13 @@
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  __ EnterInternalFrame();
-  // Pass the function and deoptimization type to the runtime system.
-  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
-  __ push(r0);
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(r0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
 
   // Get the full codegen state from the stack and untag it -> r6.
   __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1170,9 +1192,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
   __ Ret();
 }
@@ -1188,10 +1211,11 @@
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1273,17 +1297,23 @@
     __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
-    __ push(r0);
 
-    __ push(r2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(r2, r0);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
+      __ push(r0);
 
-    __ pop(r0);
-    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-    __ LeaveInternalFrame();
+      __ push(r2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(r2, r0);
+
+      __ pop(r0);
+      __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+      // Exit the internal frame.
+    }
+
     // Restore the function to r1, and the flag to r4.
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1403,156 +1433,157 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
-  __ push(r0);
-  __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
+    __ push(r0);
+    __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
-  // Make r2 the space we have left. The stack might already be overflowed
-  // here which will cause r2 to become negative.
-  __ sub(r2, sp, r2);
-  // Check if the arguments will overflow the stack.
-  __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ b(gt, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+    // Make r2 the space we have left. The stack might already be overflowed
+    // here which will cause r2 to become negative.
+    __ sub(r2, sp, r2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ b(gt, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ push(r1);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Out of stack space.
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ push(r1);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(r0);  // limit
-  __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
-  __ push(r1);
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(r0);  // limit
+    __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
+    __ push(r1);
 
-  // Get the receiver.
-  __ ldr(r0, MemOperand(fp, kRecvOffset));
+    // Get the receiver.
+    __ ldr(r0, MemOperand(fp, kRecvOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in r1.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in r1.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                           kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                             kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Do not transform the receiver for strict mode functions.
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(r0, &call_to_object);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
-  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(r0, &call_to_object);
+    __ LoadRoot(r1, Heap::kNullValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
 
-  // Check if the receiver is already a JavaScript object.
-  // r0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &push_receiver);
+    // Check if the receiver is already a JavaScript object.
+    // r0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &push_receiver);
 
-  // Convert the receiver to a regular object.
-  // r0: receiver
-  __ bind(&call_to_object);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ b(&push_receiver);
+    // Convert the receiver to a regular object.
+    // r0: receiver
+    __ bind(&call_to_object);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ b(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
-  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+    __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // r0: receiver
-  __ bind(&push_receiver);
-  __ push(r0);
+    // Push the receiver.
+    // r0: receiver
+    __ bind(&push_receiver);
+    __ push(r0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ b(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ b(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // r0: current argument index
-  __ bind(&loop);
-  __ ldr(r1, MemOperand(fp, kArgsOffset));
-  __ push(r1);
-  __ push(r0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // r0: current argument index
+    __ bind(&loop);
+    __ ldr(r1, MemOperand(fp, kArgsOffset));
+    __ push(r1);
+    __ push(r0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(r0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(r0);
 
-  // Use inline caching to access the arguments.
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ add(r0, r0, Operand(1 << kSmiTagSize));
-  __ str(r0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ add(r0, r0, Operand(1 << kSmiTagSize));
+    __ str(r0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ ldr(r1, MemOperand(fp, kLimitOffset));
-  __ cmp(r0, r1);
-  __ b(ne, &loop);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ ldr(r1, MemOperand(fp, kLimitOffset));
+    __ cmp(r0, r1);
+    __ b(ne, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(r0);
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &call_proxy);
-  __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(r0);
+    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &call_proxy);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Jump(lr);
+    frame_scope.GenerateLeaveFrame();
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Jump(lr);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(r1);  // add function proxy as last argument
-  __ add(r0, r0, Operand(1));
-  __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ SetCallKind(r5, CALL_AS_METHOD);
-  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(r1);  // add function proxy as last argument
+    __ add(r0, r0, Operand(1));
+    __ mov(r2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(r5, CALL_AS_METHOD);
+    __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Tear down the internal frame and remove function, receiver and args.
+  }
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Jump(lr);
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 36450c9..f2c0f99 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -98,9 +98,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -189,6 +189,121 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: function.
+  // [sp + kPointerSize]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        r0, r1, r2, &gc, TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ ldr(r3, MemOperand(sp, 0));
+
+  // Load the serialized scope info from the stack.
+  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+  // Setup the object header.
+  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ mov(r2, Operand(Smi::FromInt(length)));
+  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(r3, &after_sentinel);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmp(r3, Operand::Zero());
+    __ Assert(eq, message);
+  }
+  __ ldr(r3, GlobalObjectOperand());
+  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Setup the fixed slots.
+  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+
+  // Copy the global object from the previous context.
+  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, r0);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // r3: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        r0,
+                        r1,
+                        r2,
+                        fail,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ ldr(r1, FieldMemOperand(r3, i));
+      __ str(r1, FieldMemOperand(r0, i));
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ add(r2, r0, Operand(JSArray::kSize));
+    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    ASSERT((elements_size % kPointerSize) == 0);
+    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -196,10 +311,6 @@
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -207,57 +318,61 @@
   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r3, ip);
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
   __ b(eq, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+    __ cmp(r0, ip);
+    __ b(ne, &check_fast_elements);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Ret();
+
+    __ bind(&check_fast_elements);
+    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+    __ cmp(r0, ip);
+    __ b(ne, &double_elements);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Ret();
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
     __ push(r3);
     __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
     __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
-    __ LoadRoot(ip, expected_map_index);
-    __ cmp(r3, ip);
+    __ CompareRoot(r3, expected_map_index);
     __ Assert(eq, message);
     __ pop(r3);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        r0,
-                        r1,
-                        r2,
-                        &slow_case,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ ldr(r1, FieldMemOperand(r3, i));
-      __ str(r1, FieldMemOperand(r0, i));
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
-    __ add(r2, r0, Operand(JSArray::kSize));
-    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
-  }
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
 
   // Return and remove the on-stack parameters.
   __ add(sp, sp, Operand(3 * kPointerSize));
@@ -268,6 +383,49 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: object literal flags.
+  // [sp + kPointerSize]: constant properties.
+  // [sp + (2 * kPointerSize)]: literal index.
+  // [sp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
+  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ b(eq, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+  __ cmp(r0, Operand(size >> kPointerSizeLog2));
+  __ b(ne, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ ldr(r1, FieldMemOperand(r3, i));
+    __ str(r1, FieldMemOperand(r0, i));
+  }
+
+  // Return and remove the on-stack parameters.
+  __ add(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -838,9 +996,11 @@
     __ vmov(d0, r0, r1);
     __ vmov(d1, r2, r3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   0, 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number. Double returned in
   // registers r0 and r1 or in d0.
   if (masm->use_eabi_hardfloat()) {
@@ -857,6 +1017,29 @@
 }
 
 
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+  // These variants are compiled ahead of time.  See next method.
+  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+    return true;
+  }
+  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+    return true;
+  }
+  // Other register combinations are generated as and when they are needed,
+  // so it is unsafe to call them from stubs (we can't generate a stub while
+  // we are generating a stub).
+  return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+  stub1.GetCode()->set_is_pregenerated(true);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1197,6 +1380,8 @@
       __ vmov(d0, r0, r1);
       __ vmov(d1, r2, r3);
     }
+
+    AllowExternalCallThatCantCauseGC scope(masm);
     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
                      0, 2);
     __ pop(pc);  // Return.
@@ -1214,7 +1399,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1791,8 @@
 // The stub expects its argument in the tos_ register and returns its result in
 // it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // This stub uses VFP3 instructions.
   CpuFeatures::Scope scope(VFP3);
 
@@ -1713,6 +1900,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ stm(db_w, sp, kCallerSaved | lr.bit());
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = r1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ mov(r0, Operand(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+  }
+  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
+}
+
+
 void UnaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name = NULL;  // Make g++ happy.
@@ -1866,12 +2088,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r1, Operand(r0));
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r1, Operand(r0));
+      __ pop(r0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2135,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r2, r0);  // Move the new heap number into r2.
-    // Get the heap number into r0, now that the new heap number is in r2.
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r2, r0);  // Move the new heap number into r2.
+      // Get the heap number into r0, now that the new heap number is in r2.
+      __ pop(r0);
+    }
 
     // Convert the heap number in r0 to an untagged integer in r1.
     // This can't go slow-case because it's the same number we already
@@ -2028,6 +2252,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -3086,6 +3314,9 @@
     __ cmp(r3, r5);
     __ b(ne, &calculate);
     // Cache hit. Load result, cleanup and return.
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(
+        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into r0.
       __ pop();
@@ -3098,6 +3329,9 @@
   }  // if (CpuFeatures::IsSupported(VFP3))
 
   __ bind(&calculate);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(
+      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     ExternalReference runtime_function =
@@ -3133,10 +3367,11 @@
     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
     __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3149,14 +3384,15 @@
 
     // We return the value in d2 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ mov(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ mov(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3173,6 +3409,7 @@
   } else {
     __ vmov(r0, r1, d2);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3182,6 +3419,10 @@
       __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
           0, 1);
       break;
+    case TranscendentalCache::TAN:
+      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+          0, 1);
+      break;
     case TranscendentalCache::LOG:
       __ CallCFunction(ExternalReference::math_log_double_function(isolate),
           0, 1);
@@ -3199,6 +3440,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3268,11 +3510,14 @@
     __ push(lr);
     __ PrepareCallCFunction(1, 1, scratch);
     __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()),
-        1, 1);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_int_function(masm->isolate()),
+          1, 1);
+      __ pop(lr);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(r0, heapnumber);
@@ -3298,11 +3543,14 @@
     __ push(lr);
     __ PrepareCallCFunction(0, 2, scratch);
     __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()),
-        0, 2);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+      __ pop(lr);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(r0, heapnumber);
@@ -3319,6 +3567,37 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(r0);
 }
@@ -3430,8 +3709,7 @@
   __ b(eq, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r3, MemOperand(ip));
+  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(ip));
@@ -3469,6 +3747,7 @@
   __ sub(r6, r6, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
   // Setup argc and the builtin function in callee-saved registers.
@@ -3527,7 +3806,7 @@
   // r3: argc
   // [sp+0]: argv
 
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
 
   // Called from C, so do not pop argc and args on exit (preserve sp)
   // No need to save register-passed args
@@ -3590,31 +3869,33 @@
   __ bind(&cont);
   __ push(ip);
 
-  // Call a faked try-block that does the invoke.
-  __ bl(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
-  // Coming in here the fp will be invalid because the PushTryHandler below
-  // sets it to 0 to signal the existence of the JSEntry frame.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r0, MemOperand(ip));
   __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
   // Must preserve r0-r4, r5-r7 are available.
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bl(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r5, MemOperand(ip));
+  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r5, MemOperand(ip));
@@ -3708,7 +3989,7 @@
   const Register inline_site = r9;
   const Register scratch = r2;
 
-  const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
+  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
 
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
@@ -3738,7 +4019,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -3759,7 +4040,8 @@
     __ sub(inline_site, lr, scratch);
     // Get the map location in scratch and patch it.
     __ GetRelocatedValueLocation(inline_site, scratch);
-    __ str(map, MemOperand(scratch));
+    __ ldr(scratch, MemOperand(scratch));
+    __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   }
 
   // Register mapping: r3 is object map and r4 is function prototype.
@@ -3851,10 +4133,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(r0, r1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r0, r1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ cmp(r0, Operand::Zero());
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4250,10 +4533,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4375,25 +4654,39 @@
   Label seq_string;
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  // First check for flat string.
-  __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
+  // First check for flat string.  None of the following string type tests will
+  // succeed if subject is not a string or a short external string.
+  __ and_(r1,
+          r0,
+          Operand(kIsNotStringMask |
+                  kStringRepresentationMask |
+                  kShortExternalStringMask),
+          SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
+  // r1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(r1, Operand(kExternalStringTag));
   __ b(lt, &cons_string);
-  __ b(eq, &runtime);
+  __ b(eq, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ b(ne, &runtime);
 
   // String is sliced.
   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4404,8 +4697,7 @@
   // String is a cons string, check whether it is flat.
   __ bind(&cons_string);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
-  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
-  __ cmp(r0, r1);
+  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   // Is first part of cons or parent of slice a flat string?
@@ -4414,7 +4706,8 @@
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
-  __ b(ne, &runtime);
+  __ b(ne, &external_string);
+
   __ bind(&seq_string);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -4480,8 +4773,7 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4824,7 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r1, MemOperand(r1, 0));
+  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4866,25 @@
   __ str(r2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
-  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+  __ mov(r2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      r2,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ mov(r3, last_match_info_elements);
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -4615,6 +4915,26 @@
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
+  // External string.  Short external strings have already been ruled out.
+  // r0: scratch
+  __ bind(&external_string);
+  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ tst(r0, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found");
+  }
+  __ ldr(subject,
+         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(subject,
+         subject,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ jmp(&seq_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4712,7 +5032,24 @@
 }
 
 
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+  code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // r1 : the function to call
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -4727,16 +5064,12 @@
     __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
     __ b(ne, &call);
     // Patch the receiver on the stack with the global receiver object.
-    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
-    __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+    __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+    __ str(r2, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // function, receiver [, arguments]
-  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
   __ JumpIfSmi(r1, &non_function);
@@ -4774,7 +5107,7 @@
   __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ SetCallKind(r5, CALL_AS_FUNCTION);
+  __ SetCallKind(r5, CALL_AS_METHOD);
   {
     Handle<Code> adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -4855,100 +5188,41 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
-  __ cmp(ip, Operand(scratch_));
+  __ cmp(ip, Operand(index_));
   __ b(ls, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result_, Operand(kStringRepresentationMask));
-  __ b(eq, &flat_string);
+  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
 
-  // Handle non-flat strings.
-  __ and_(result_, result_, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(result_, Operand(kExternalStringTag));
-  __ b(gt, &sliced_string);
-  __ b(eq, &call_runtime_);
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
-  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(result_, Operand(ip));
-  __ b(ne, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
-  __ add(scratch_, scratch_, result_);
-  __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // Check that parent is not an external string. Go to runtime otherwise.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result_, Operand(kStringRepresentationMask));
-  __ b(ne, &call_runtime_);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ tst(result_, Operand(kStringEncodingMask));
-  __ b(ne, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register. We can
-  // add without shifting since the smi tag size is the log2 of the
-  // number of bytes in a two-byte character.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
-  __ add(scratch_, object_, Operand(scratch_));
-  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
-  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
   __ mov(result_, Operand(result_, LSL, kSmiTagSize));
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              scratch_,
+              result_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
-  __ Push(object_, index_);
+  __ push(object_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4959,15 +5233,14 @@
   }
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
-  __ Move(scratch_, r0);
-  __ pop(index_);
+  __ Move(index_, r0);
   __ pop(object_);
   // Reload the instance type.
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4976,6 +5249,7 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
+  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   __ Move(result_, r0);
@@ -5012,7 +5286,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5037,7 +5312,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -5321,11 +5597,11 @@
 
     __ cmp(undefined, candidate);
     __ b(eq, not_found);
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
       __ cmp(ip, candidate);
-      __ Assert(eq, "oddball in symbol table is not undefined or null");
+      __ Assert(eq, "oddball in symbol table is not undefined or the hole");
     }
     __ jmp(&next_probe[i]);
 
@@ -5363,11 +5639,7 @@
                                     Register hash,
                                     Register character) {
   // hash = character + (character << 10);
-  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
-  // Untag smi seed and add the character.
-  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
-  // hash += hash << 10;
-  __ add(hash, hash, Operand(hash, LSL, 10));
+  __ add(hash, character, Operand(character, LSL, 10));
   // hash ^= hash >> 6;
   __ eor(hash, hash, Operand(hash, LSR, 6));
 }
@@ -5392,12 +5664,13 @@
   // hash ^= hash >> 11;
   __ eor(hash, hash, Operand(hash, LSR, 11));
   // hash += hash << 15;
-  __ add(hash, hash, Operand(hash, LSL, 15));
+  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
 
-  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
+  uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+  __ and_(hash, hash, Operand(kHashShiftCutOffMask));
 
   // if (hash == 0) hash = 27;
-  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
+  __ mov(hash, Operand(27), LeaveCC, eq);
 }
 
 
@@ -5612,15 +5885,12 @@
     // r3: from index (untagged smi)
     // r6 (a.k.a. to): to (smi)
     // r7 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ tst(r1, Operand(kStringRepresentationMask));
-    __ b(eq, &seq_string);
+    Label allocate_slice, sliced_string, seq_or_external_string;
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ tst(r1, Operand(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ b(eq, &runtime);
+    __ b(eq, &seq_or_external_string);
 
     __ tst(r1, Operand(kSlicedNotConsMask));
     __ b(ne, &sliced_string);
@@ -5639,8 +5909,8 @@
     __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
     __ jmp(&allocate_slice);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(r5, r0);
 
     __ bind(&allocate_slice);
@@ -6366,12 +6636,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss =
       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(r1, r0);
-  __ mov(ip, Operand(Smi::FromInt(op_)));
-  __ push(ip);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r0);
+    __ mov(ip, Operand(Smi::FromInt(op_)));
+    __ push(ip);
+    __ CallExternalReference(miss, 3);
+  }
   // Compute the entry point of the rewritten stub.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
   // Restore registers.
@@ -6410,14 +6681,13 @@
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register receiver,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register scratch0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -6475,14 +6745,12 @@
   __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ mov(r1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
   __ tst(r0, Operand(r0));
   __ ldm(ia_w, sp, spill_mask);
 
   __ b(eq, done);
   __ b(ne, miss);
-  return result;
 }
 
 
@@ -6497,6 +6765,11 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
+  ASSERT(!elements.is(scratch1));
+  ASSERT(!elements.is(scratch2));
+  ASSERT(!name.is(scratch1));
+  ASSERT(!name.is(scratch2));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6540,8 +6813,14 @@
       ~(scratch1.bit() | scratch2.bit());
 
   __ stm(db_w, sp, spill_mask);
-  __ Move(r0, elements);
-  __ Move(r1, name);
+  if (name.is(r0)) {
+    ASSERT(!elements.is(r1));
+    __ Move(r1, name);
+    __ Move(r0, elements);
+  } else {
+    __ Move(r0, elements);
+    __ Move(r1, name);
+  }
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
   __ tst(r0, Operand(r0));
@@ -6554,6 +6833,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  r1: key
@@ -6643,6 +6924,333 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { r6, r4, r7, EMIT_REMEMBERED_SET },
+  { r6, r2, r7, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+  // Also used in KeyedStoreIC::GenerateGeneric.
+  { r3, r4, r5, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { r4, r1, r2, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { r1, r2, r3, EMIT_REMEMBERED_SET },
+  { r3, r2, r1, EMIT_REMEMBERED_SET },
+  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { r2, r1, r3, EMIT_REMEMBERED_SET },
+  { r3, r1, r2, EMIT_REMEMBERED_SET },
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { r4, r2, r3, EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { r2, r3, r9, EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { r6, r2, r0, EMIT_REMEMBERED_SET },
+  { r2, r6, r9, EMIT_REMEMBERED_SET },
+  // StoreArrayLiteralElementStub::Generate
+  { r5, r0, r6, EMIT_REMEMBERED_SET },
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ b(&skip_to_incremental_noncompacting);
+  __ b(&skip_to_incremental_compacting);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(r0));
+  __ Move(address, regs_.address());
+  __ Move(r0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(r1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ ldr(r1, MemOperand(address, 0));
+  }
+  __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : element value to store
+  //  -- r1    : array literal
+  //  -- r2    : map of array literal
+  //  -- r3    : element index as smi
+  //  -- r4    : array literal index in function as smi
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(r2, r5, &double_elements);
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(r0, &smi_element);
+  __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(r1, r3, r0);
+  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
+  __ Push(r5, r4);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ str(r0, MemOperand(r6, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
+                                 &slow_elements);
+  __ Ret();
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index cdea03e..38ed476 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -117,7 +136,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -216,7 +235,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -387,6 +406,9 @@
         the_heap_number_(the_heap_number),
         scratch_(scratch) { }
 
+  bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+
  private:
   Register the_int_;
   Register the_heap_number_;
@@ -435,6 +457,218 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   Assembler::kInstrSize);
+
+    if (Assembler::IsBranch(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+    if (Assembler::IsBranch(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        masm->sub(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+        // Save all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        // Restore all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+        masm->add(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+      }
+      masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    Register GetRegThatIsNotOneOf(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
@@ -622,14 +856,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register receiver,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -639,6 +872,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -651,7 +886,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index bf748a9..3371e8a 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -30,23 +30,368 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
+
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  // Set transitioned map.
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required;
+  bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
+  __ push(lr);
+
+  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: source FixedArray
+  // r5: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(lr, lr, Operand(r5, LSL, 2));
+  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  // r6: destination FixedDoubleArray, not tagged as heap object
+  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+  // Set destination FixedDoubleArray's length.
+  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  // Update receiver's map.
+
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ add(r3, r6, Operand(kHeapObjectTag));
+  __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ RecordWriteField(r2,
+                      JSObject::kElementsOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Prepare for conversion loop.
+  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(r6, r7, Operand(r5, LSL, 2));
+  __ mov(r4, Operand(kHoleNanLower32));
+  __ mov(r5, Operand(kHoleNanUpper32));
+  // r3: begin of source FixedArray element fields, not tagged
+  // r4: kHoleNanLower32
+  // r5: kHoleNanUpper32
+  // r6: end of destination FixedDoubleArray, not tagged
+  // r7: begin of FixedDoubleArray element fields, not tagged
+  if (!vfp3_supported) __ Push(r1, r0);
+
+  __ b(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(lr);
+  __ b(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ ldr(r9, MemOperand(r3, 4, PostIndex));
+  // r9: current element
+  __ JumpIfNotSmi(r9, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  __ SmiUntag(r9);
+  if (vfp3_supported) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, r9);
+    __ vcvt_f64_s32(d0, s0);
+    __ vstr(d0, r7, 0);
+    __ add(r7, r7, Operand(8));
+  } else {
+    FloatingPointHelper::ConvertIntToDouble(masm,
+                                            r9,
+                                            FloatingPointHelper::kCoreRegisters,
+                                            d0,
+                                            r0,
+                                            r1,
+                                            lr,
+                                            s0);
+    __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
+  }
+  __ b(&entry);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, "object found in smi-only array");
+  }
+  __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+
+  __ bind(&entry);
+  __ cmp(r7, r6);
+  __ b(lt, &loop);
+
+  if (!vfp3_supported) __ Pop(r1, r0);
+  __ pop(lr);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : target map, scratch for subsequent call
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label entry, loop, convert_hole, gc_required;
+
+  __ push(lr);
+  __ Push(r3, r2, r1, r0);
+
+  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: source FixedDoubleArray
+  // r5: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(r0, r0, Operand(r5, LSL, 1));
+  __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  // r6: destination FixedArray, not tagged as heap object
+  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+  // Set destination FixedDoubleArray's length.
+  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+
+  // Prepare for conversion loop.
+  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
+  __ add(r6, r6, Operand(kHeapObjectTag));
+  __ add(r5, r3, Operand(r5, LSL, 1));
+  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
+  // r3: begin of destination FixedArray element fields, not tagged
+  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
+  // r5: end of destination FixedArray, not tagged
+  // r6: destination FixedArray
+  // r7: the-hole pointer
+  // r9: heap number map
+  __ b(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ Pop(r3, r2, r1, r0);
+  __ pop(lr);
+  __ b(fail);
+
+  __ bind(&loop);
+  __ ldr(r1, MemOperand(r4, 8, PostIndex));
+  // lr: current element's upper 32 bit
+  // r4: address of next element's upper 32 bit
+  __ cmp(r1, Operand(kHoleNanUpper32));
+  __ b(eq, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
+  // r2: new heap number
+  __ ldr(r0, MemOperand(r4, 12, NegOffset));
+  __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
+  __ mov(r0, r3);
+  __ str(r2, MemOperand(r3, 4, PostIndex));
+  __ RecordWrite(r6,
+                 r0,
+                 r2,
+                 kLRHasBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ b(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ str(r7, MemOperand(r3, 4, PostIndex));
+
+  __ bind(&entry);
+  __ cmp(r3, r5);
+  __ b(lt, &loop);
+
+  __ Pop(r3, r2, r1, r0);
+  // Update receiver's map.
+  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ RecordWriteField(r2,
+                      HeapObject::kMapOffset,
+                      r3,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ RecordWriteField(r2,
+                      JSObject::kElementsOffset,
+                      r6,
+                      r9,
+                      kLRHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(lr);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ tst(result, Operand(kIsIndirectStringMask));
+  __ b(eq, &check_sequential);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ tst(result, Operand(kSlicedNotConsMask));
+  __ b(eq, &cons_string);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
+  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+  __ cmp(result, ip);
+  __ b(ne, call_runtime);
+  // Get the first of the two strings and load its instance type.
+  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(result, Operand(kStringRepresentationMask));
+  __ b(ne, &external_string);
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ add(string,
+         string,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ tst(result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ tst(result, Operand(kShortExternalStringMask));
+  __ b(ne, call_runtime);
+  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label ascii, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ tst(result, Operand(kStringEncodingMask));
+  __ b(ne, &ascii);
+  // Two-byte string.
+  __ ldrh(result, MemOperand(string, index, LSL, 1));
+  __ jmp(&done);
+  __ bind(&ascii);
+  // Ascii string.
+  __ ldrb(result, MemOperand(string, index));
+  __ bind(&done);
+}
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index d27982a..c340e6b 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -29,7 +29,6 @@
 #define V8_ARM_CODEGEN_ARM_H_
 
 #include "ast.h"
-#include "code-stubs-arm.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -69,21 +68,26 @@
                               int pos,
                               bool right_here = false);
 
-  // Constants related to patching of inlined load/store.
-  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
-    return FLAG_debug_code ? 32 : 13;
-  }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
-  static int GetInlinedNamedStoreInstructionsAfterPatch() {
-    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
-    return Isolate::Current()->inlined_write_barrier_size() + 4;
-  }
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 823c6ff..49b8db7 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -87,22 +87,21 @@
 namespace internal {
 
 // Constant pool marker.
-static const int kConstantPoolMarkerMask = 0xffe00000;
-static const int kConstantPoolMarker = 0x0c000000;
-static const int kConstantPoolLengthMask = 0x001ffff;
+const int kConstantPoolMarkerMask = 0xffe00000;
+const int kConstantPoolMarker = 0x0c000000;
+const int kConstantPoolLengthMask = 0x001ffff;
 
 // Number of registers in normal ARM mode.
-static const int kNumRegisters = 16;
+const int kNumRegisters = 16;
 
 // VFP support.
-static const int kNumVFPSingleRegisters = 32;
-static const int kNumVFPDoubleRegisters = 16;
-static const int kNumVFPRegisters =
-    kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+const int kNumVFPSingleRegisters = 32;
+const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
 
 // PC is register 15.
-static const int kPCRegister = 15;
-static const int kNoRegister = -1;
+const int kPCRegister = 15;
+const int kNoRegister = -1;
 
 // -----------------------------------------------------------------------------
 // Conditions.
@@ -371,9 +370,9 @@
   // stop
   kStopCode = 1 << 23
 };
-static const uint32_t kStopCodeMask = kStopCode - 1;
-static const uint32_t kMaxStopCode = kStopCode - 1;
-static const int32_t  kDefaultStopCode = -1;
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t  kDefaultStopCode = -1;
 
 
 // Type of VFP register. Determines register encoding.
@@ -391,17 +390,17 @@
 
 // This mask does not include the "inexact" or "input denormal" cumulative
 // exceptions flags, because we usually don't want to check for it.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-static const uint32_t kVFPInexactExceptionBit = 1 << 4;
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPExceptionMask = 0xf;
+const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+const uint32_t kVFPInexactExceptionBit = 1 << 4;
+const uint32_t kVFPFlushToZeroMask = 1 << 24;
 
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+const uint32_t kVFPNConditionFlagBit = 1 << 31;
+const uint32_t kVFPZConditionFlagBit = 1 << 30;
+const uint32_t kVFPCConditionFlagBit = 1 << 29;
+const uint32_t kVFPVConditionFlagBit = 1 << 28;
 
 
 // VFP rounding modes. See ARM DDI 0406B Page A2-29.
@@ -418,7 +417,7 @@
   kRoundToZero = RZ
 };
 
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
+const uint32_t kVFPRoundingModeMask = 3 << 22;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 07a2272..8374103 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ tst(reg, Operand(0xc0000000));
-          __ Assert(eq, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ tst(reg, Operand(0xc0000000));
+            __ Assert(eq, "Unable to encode value as smi");
+          }
+          __ mov(reg, Operand(reg, LSL, kSmiTagSize));
         }
-        __ mov(reg, Operand(reg, LSL, kSmiTagSize));
       }
+      __ stm(db_w, sp, object_regs | non_object_regs);
     }
-    __ stm(db_w, sp, object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
-  __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
+    __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ ldm(ia_w, sp, object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ mov(reg, Operand(reg, LSR, kSmiTagSize));
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ mov(reg, Operand(kDebugZapValue));
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ ldm(ia_w, sp, object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
@@ -265,11 +267,11 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- r1 : function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0);
+  Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index d4f251f..4b54b6d 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -79,18 +79,24 @@
     ASSERT(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     ASSERT(call_address + patch_size() <= code->instruction_end());
-
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
   }
 
+  Isolate* isolate = code->GetIsolate();
+
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -102,7 +108,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
@@ -137,10 +144,14 @@
          reinterpret_cast<uint32_t>(check_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
@@ -161,6 +172,9 @@
          reinterpret_cast<uint32_t>(replacement_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 2 * kInstrSize, check_code);
 }
 
 
@@ -600,7 +614,10 @@
   __ mov(r5, Operand(ExternalReference::isolate_address()));
   __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
@@ -654,8 +671,11 @@
   // r0: deoptimizer object; r1: scratch.
   __ PrepareCallCFunction(1, r1);
   // Call Deoptimizer::ComputeOutputFrames().
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
@@ -671,7 +691,6 @@
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
-  // __ add(r6, r2, Operand(r3, LSL, 1));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
@@ -704,7 +723,10 @@
   __ pop(ip);  // remove sp
   __ pop(ip);  // remove lr
 
-  __ InitializeRootRegister();
+  // Set up the roots register.
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(isolate);
+  __ mov(r10, Operand(roots_array_start));
 
   __ pop(ip);  // remove pc
   __ pop(r7);  // get continuation, leave pc on stack
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 26bbd82..1844149 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -35,22 +35,22 @@
 // The ARM ABI does not specify the usage of register r9, which may be reserved
 // as the static base or thread register on some platforms, in which case we
 // leave it alone. Adjust the value of kR9Available accordingly:
-static const int kR9Available = 1;  // 1 if available to us, 0 if reserved
+const int kR9Available = 1;  // 1 if available to us, 0 if reserved
 
 
 // Register list in load/store instructions
 // Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 16;
+const int kNumRegs = 16;
 
 
 // Caller-saved/arguments registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 0 |  // r0 a1
   1 << 1 |  // r1 a2
   1 << 2 |  // r2 a3
   1 << 3;   // r3 a4
 
-static const int kNumJSCallerSaved = 4;
+const int kNumJSCallerSaved = 4;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
@@ -60,7 +60,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
   1 <<  4 |  //  r4 v1
   1 <<  5 |  //  r5 v2
   1 <<  6 |  //  r6 v3
@@ -70,36 +70,45 @@
   1 << 10 |  // r10 v7
   1 << 11;   // r11 v8 (fp in JavaScript code)
 
-static const int kNumCalleeSaved = 7 + kR9Available;
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+const RegList kCallerSaved =
+  1 <<  0 |  // r0
+  1 <<  1 |  // r1
+  1 <<  2 |  // r2
+  1 <<  3 |  // r3
+  1 <<  9;   // r9
+
+
+const int kNumCalleeSaved = 7 + kR9Available;
 
 // Double registers d8 to d15 are callee-saved.
-static const int kNumDoubleCalleeSaved = 8;
+const int kNumDoubleCalleeSaved = 8;
 
 
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
 // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
-    kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kStateOffset   = 1 * kPointerSize;
-  static const int kContextOffset = 2 * kPointerSize;
-  static const int kFPOffset      = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 50ed8b1..fdd3266 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -39,6 +39,7 @@
 #include "stub-cache.h"
 
 #include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -46,11 +47,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -131,6 +127,8 @@
   ASSERT(info_ == NULL);
   info_ = info;
   scope_ = info->scope();
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -145,7 +143,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -155,6 +153,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
@@ -200,13 +203,12 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+
+        // Update the write barrier.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
   }
@@ -234,7 +236,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -264,7 +266,10 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -391,7 +396,7 @@
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -414,7 +419,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -449,7 +454,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -508,7 +513,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -575,7 +580,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -665,17 +670,20 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWrite(scratch0,
-                   Operand(Context::SlotOffset(var->index())),
-                   scratch1,
-                   src);
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kLRHasBeenSaved,
+                              kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -686,13 +694,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
@@ -703,13 +705,15 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
+                                        VariableMode mode,
                                         FunctionLiteral* function,
                                         int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -721,7 +725,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ str(result_register(), StackOperand(variable));
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, StackOperand(variable));
@@ -746,10 +750,16 @@
         __ str(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ mov(r1, Operand(cp));
-        __ RecordWrite(r1, Operand(offset), r2, result_register());
+        __ RecordWriteContextSlot(cp,
+                                  offset,
+                                  result_register(),
+                                  r2,
+                                  kLRHasBeenSaved,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, ContextOperand(cp, variable->index()));
@@ -761,11 +771,13 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ mov(r2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+          ? READ_ONLY : NONE;
       __ mov(r1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -775,7 +787,7 @@
         __ Push(cp, r2, r1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
         __ Push(cp, r2, r1, r0);
       } else {
@@ -917,11 +929,17 @@
   __ bind(&done_convert);
   __ push(r0);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+  __ b(le, &call_runtime);
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
+  Label next;
   // Preload a couple of values used in the loop.
   Register  empty_fixed_array_value = r6;
   __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -1000,9 +1018,16 @@
   __ jmp(&loop);
 
   // We got a fixed array in register r0. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ Push(r1, r0);
+  __ mov(r1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
+  __ ldr(r2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
+  __ b(gt, &non_proxy);
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(r1, r0);  // Smi and array
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
@@ -1019,18 +1044,23 @@
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register r2.
   __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
   __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r4, Operand(r2));
   __ b(eq, &update_each);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  __ cmp(r2, Operand(Smi::FromInt(0)));
+  __ b(eq, &update_each);
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1085,7 +1115,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ mov(r0, Operand(info));
     __ push(r0);
     __ CallStub(&stub);
@@ -1116,7 +1146,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1129,7 +1159,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1173,7 +1203,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1205,15 +1235,24 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      if (local->mode() == CONST) {
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      } else {  // LET || CONST_HARMONY
+        __ b(ne, done);
+        __ mov(r0, Operand(var->name()));
+        __ push(r0);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1246,24 +1285,64 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        GetVar(r0, var);
-        __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-        if (var->mode() == Variable::LET) {
-          Label done;
-          __ b(ne, &done);
-          __ mov(r0, Operand(var->name()));
-          __ push(r0);
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          __ bind(&done);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
         } else {
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        context()->Plug(r0);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(r0, var);
+          __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ b(ne, &done);
+            __ mov(r0, Operand(var->name()));
+            __ push(r0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+          }
+          context()->Plug(r0);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1337,10 +1416,11 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ ldr(r3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(expr->constant_properties()));
+  __ mov(r1, Operand(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1349,10 +1429,15 @@
       : ObjectLiteral::kNoFlags;
   __ mov(r0, Operand(Smi::FromInt(flags)));
   __ Push(r3, r2, r1, r0);
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1386,9 +1471,9 @@
             VisitForAccumulatorValue(value);
             __ mov(r2, Operand(key->handle()));
             __ ldr(r1, MemOperand(sp));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
             __ Call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1447,13 +1532,20 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(expr->constant_elements()));
+  __ mov(r1, Operand(constant_elements));
   __ Push(r3, r2, r1);
-  if (expr->constant_elements()->map() ==
+  if (has_fast_elements && constant_elements_values->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1465,8 +1557,13 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode = has_fast_elements
+      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1489,15 +1586,23 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
-    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ str(result_register(), FieldMemOperand(r1, offset));
-
-    // Update the write barrier for the array store with r0 as the scratch
-    // register.
-    __ RecordWrite(r1, Operand(offset), r2, result_register());
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ ldr(r6, MemOperand(sp));  // Copy of array literal.
+      __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
+      __ str(result_register(), FieldMemOperand(r1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(r1, offset, result_register(), r2,
+                          kLRHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ ldr(r1, MemOperand(sp));  // Copy of array literal.
+      __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
+      __ mov(r3, Operand(Smi::FromInt(i)));
+      __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1629,7 +1734,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1637,7 +1742,7 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1785,9 +1890,9 @@
       __ mov(r1, r0);
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ Call(ic);
       break;
     }
@@ -1798,9 +1903,9 @@
       __ mov(r1, r0);
       __ pop(r2);
       __ pop(r0);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ Call(ic);
       break;
     }
@@ -1816,9 +1921,9 @@
     // Global var, const, or let.
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1844,12 +1949,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ mov(r0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -1869,12 +1974,14 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWrite(r1, Operand(offset), r2, r3);
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, r1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1887,13 +1994,15 @@
       __ str(r0, location);
       if (var->IsContextSlot()) {
         __ mov(r3, r0);
-        __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ mov(r0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -1930,9 +2039,9 @@
     __ pop(r1);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
   __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1976,9 +2085,9 @@
     __ pop(r2);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
   __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2083,6 +2192,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2091,8 +2201,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2101,22 +2210,20 @@
   }
   __ push(r1);
 
-  // Push the receiver of the enclosing function and do runtime call.
+  // Push the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(r1);
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ mov(r1, Operand(Smi::FromInt(strict_mode)));
+  // Push the language mode.
+  __ mov(r1, Operand(Smi::FromInt(language_mode())));
   __ push(r1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
+  __ push(r1);
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2150,28 +2257,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(r0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(r1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
@@ -2182,6 +2272,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2295,7 +2386,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2307,7 +2399,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2315,7 +2407,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2327,7 +2420,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2335,7 +2428,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2360,14 +2454,15 @@
   __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2381,14 +2476,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2404,7 +2500,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2412,8 +2508,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2492,12 +2588,13 @@
   __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2511,14 +2608,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2532,14 +2630,15 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2553,7 +2652,7 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2561,8 +2660,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2585,14 +2684,15 @@
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2608,14 +2708,15 @@
 
   __ pop(r1);
   __ cmp(r0, r1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2629,9 +2730,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2651,7 +2751,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2662,20 +2763,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
   // Map is now in r0.
   __ b(lt, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ b(eq, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
-  __ b(ge, &function);
+  __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ b(eq, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_constructor);
@@ -2707,7 +2812,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2715,6 +2820,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2728,9 +2834,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2750,7 +2855,8 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(VFP3)) {
     __ PrepareCallCFunction(1, r0);
-    __ mov(r0, Operand(ExternalReference::isolate_address()));
+    __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
@@ -2770,8 +2876,9 @@
     __ mov(r0, r4);
   } else {
     __ PrepareCallCFunction(2, r0);
+    __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
     __ mov(r0, Operand(r4));
-    __ mov(r1, Operand(ExternalReference::isolate_address()));
+    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2780,9 +2887,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2792,9 +2900,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2805,9 +2914,9 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -2823,8 +2932,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2834,9 +2944,9 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
@@ -2853,16 +2963,18 @@
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+  __ mov(r2, r0);
+  __ RecordWriteField(
+      r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
-
   // Load the argument on the stack and call the stub.
   VisitForStackValue(args->at(0));
 
@@ -2872,9 +2984,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
-
   VisitForAccumulatorValue(args->at(0));
 
   Label done;
@@ -2890,15 +3002,14 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
-  Register scratch = r2;
   Register result = r3;
 
   __ pop(object);
@@ -2908,7 +3019,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2937,16 +3047,15 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
-
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
-  Register scratch1 = r2;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
 
   __ pop(object);
@@ -2956,8 +3065,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2986,9 +3094,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -2998,9 +3106,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3010,10 +3118,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3021,10 +3130,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3032,10 +3142,23 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3043,8 +3166,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3052,7 +3176,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3061,18 +3186,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
+  __ b(eq, &proxy);
+
   // InvokeFunction requires the function in r1. Move it in there.
   __ mov(r1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(r1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(r0);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3082,7 +3220,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3141,16 +3280,31 @@
   __ str(scratch1, MemOperand(index2, 0));
   __ str(scratch2, MemOperand(index1, 0));
 
-  Label new_space;
-  __ InNewSpace(elements, scratch1, eq, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   scratch1,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   ne,
+                   &no_remembered_set);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask.)
 
-  __ mov(scratch1, elements);
-  __ RecordWriteHelper(elements, index1, scratch2);
-  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index1,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index2,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3164,9 +3318,9 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
 
@@ -3215,7 +3369,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = r0;
@@ -3255,7 +3410,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3267,14 +3423,15 @@
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3289,12 +3446,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3571,7 +3728,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
         __ push(r1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(r0);
@@ -3579,7 +3738,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
@@ -3622,18 +3781,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(r0);
+        __ bind(&done);
       }
       break;
     }
@@ -3826,9 +4002,9 @@
     case NAMED_PROPERTY: {
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3843,9 +4019,9 @@
     case KEYED_PROPERTY: {
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3892,20 +4068,25 @@
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(r0, if_true);
@@ -3942,9 +4123,11 @@
 
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    Split(ge, if_true, if_false, fall_through);
-
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+    __ b(eq, if_true);
+    __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+    Split(eq, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(r0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3963,18 +4146,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-  Split(eq, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3982,9 +4154,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -3992,20 +4167,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -4015,7 +4183,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -4029,33 +4197,25 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cond = eq;
-          __ pop(r1);
           break;
         case Token::LT:
           cond = lt;
-          __ pop(r1);
           break;
         case Token::GT:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cond = lt;
-          __ mov(r1, result_register());
-          __ pop(r0);
+          cond = gt;
          break;
         case Token::LTE:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cond = ge;
-          __ mov(r1, result_register());
-          __ pop(r0);
+          cond = le;
           break;
         case Token::GTE:
           cond = ge;
-          __ pop(r1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ pop(r1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4073,7 +4233,7 @@
       Handle<Code> ic = CompareIC::GetUninitialized(op);
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
     }
@@ -4085,8 +4245,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4094,15 +4255,21 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(r1, nil_value);
   __ cmp(r0, r1);
-  if (expr->is_strict()) {
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ b(eq, if_true);
-    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(r1, other_nil_value);
     __ cmp(r0, r1);
     __ b(eq, if_true);
     __ JumpIfSmi(r0, if_false);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2e49cae..f8e4bbb 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -208,7 +208,8 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1);
+  __ RecordWrite(
+      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
 }
 
 
@@ -381,10 +382,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- r1    : receiver
   //  -- r2    : name
@@ -394,7 +395,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
@@ -463,7 +464,7 @@
 }
 
 
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -485,10 +486,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -504,21 +505,22 @@
   // Get the receiver of the function from the stack.
   __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(r3, r2);
+    // Push the receiver and the name of the function.
+    __ Push(r3, r2);
 
-  // Call the entry.
-  __ mov(r0, Operand(2));
-  __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ mov(r0, Operand(2));
+    __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to r1 and leave the internal frame.
-  __ mov(r1, Operand(r0));
-  __ LeaveInternalFrame();
+    // Move result to r1 and leave the internal frame.
+    __ mov(r1, Operand(r0));
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -539,7 +541,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -551,18 +553,6 @@
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -578,27 +568,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -650,12 +619,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
-  __ EnterInternalFrame();
-  __ push(r2);  // save the key
-  __ Push(r1, r2);  // pass the receiver and the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(r2);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r2);  // save the key
+    __ Push(r1, r2);  // pass the receiver and the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(r2);  // restore the key
+  }
   __ mov(r1, r0);
   __ jmp(&do_call);
 
@@ -715,7 +685,7 @@
   __ JumpIfSmi(r2, &miss);
   __ IsObjectJSStringType(r2, r0, &miss);
 
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -908,7 +878,8 @@
       GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
   __ str(r0, mapped_location);
   __ add(r6, r3, r5);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +887,8 @@
       GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
   __ str(r0, unmapped_location);
   __ add(r6, r3, r4);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1137,14 +1109,12 @@
 
   Register receiver = r1;
   Register index = r0;
-  Register scratch1 = r2;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1239,6 +1209,47 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r2     : receiver
+  //  -- r3     : target map
+  //  -- lr     : return address
+  // -----------------------------------
+  // Must return the modified receiver in r0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ mov(r0, r2);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ push(r2);
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r2     : receiver
+  //  -- r3     : target map
+  //  -- lr     : return address
+  // -----------------------------------
+  // Must return the modified receiver in r0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ mov(r0, r2);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ push(r2);
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
   // ---------- S t a t e --------------
@@ -1267,13 +1278,17 @@
   //  -- r2     : receiver
   //  -- lr     : return address
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, array, extra, check_if_double_array;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
 
   // Register usage.
   Register value = r0;
   Register key = r1;
   Register receiver = r2;
   Register elements = r3;  // Elements array of the receiver.
+  Register elements_map = r6;
+  Register receiver_map = r7;
   // r4 and r5 are used as general scratch registers.
 
   // Check that the key is a smi.
@@ -1281,35 +1296,26 @@
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
-  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
   __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JSObject.
-  __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, &slow);
-  __ cmp(r4, Operand(JS_PROXY_TYPE));
-  __ b(eq, &slow);
-  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ b(eq, &slow);
 
   // Object case: Check key against length in the elements array.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(ne, &slow);
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
-  __ b(lo, &fast);
+  __ b(lo, &fast_object_with_map_check);
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
@@ -1330,21 +1336,31 @@
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
   __ b(hs, &slow);
+  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ b(ne, &check_if_double_array);
   // Calculate key + 1 as smi.
   STATIC_ASSERT(kSmiTag == 0);
   __ add(r4, key, Operand(Smi::FromInt(1)));
   __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ b(&fast);
+  __ b(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ b(ne, &slow);
+  // Add 1 to key, and go to common element store code for doubles.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ add(r4, key, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
   __ bind(&array);
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(ne, &slow);
 
   // Check the key against the length in the array.
   __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1352,18 +1368,57 @@
   __ b(hs, &extra);
   // Fall through to fast case.
 
-  __ bind(&fast);
-  // Fast case, store the value to the elements backing store.
-  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(value, MemOperand(r5));
-  // Skip write barrier if the written value is a smi.
-  __ tst(value, Operand(kSmiTagMask));
-  __ Ret(eq);
-  // Update write barrier for the elements array address.
-  __ sub(r4, r5, Operand(elements));
-  __ RecordWrite(elements, Operand(r4), r5, r6);
+  __ bind(&fast_object_with_map_check);
+  Register scratch_value = r4;
+  Register address = r5;
+  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ b(ne, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
+  __ Ret();
 
+  __ bind(&non_smi_value);
+  // Escape to slow case when writing non-smi into smi-only array.
+  __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+  // Fast elements array, store the value to the elements backing store.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements,
+                 address,
+                 scratch_value,
+                 kLRHasNotBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ b(ne, &slow);
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value,
+                                 key,
+                                 receiver,
+                                 elements,
+                                 r4,
+                                 r5,
+                                 r6,
+                                 r7,
+                                 &slow);
   __ Ret();
 }
 
@@ -1510,11 +1565,9 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return lt;
+      return gt;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return ge;
+      return le;
     case Token::GTE:
       return ge;
     default:
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 30ccd05..2341774 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -212,10 +212,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -227,6 +228,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -241,6 +249,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -390,6 +406,12 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
     : spill_slot_count_(0),
       info_(info),
@@ -711,7 +733,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -741,7 +765,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -753,7 +777,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -811,28 +836,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineAsRegister(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), r1);
-    LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, r0), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsTagged()) {
@@ -994,10 +997,13 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
@@ -1007,7 +1013,6 @@
                                           argument_count_,
                                           value_count,
                                           outer);
-  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,7 +1021,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1206,8 +1211,9 @@
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallFunction(function), r0), instr);
 }
 
 
@@ -1232,8 +1238,24 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), r1);
+    LOperand* right = UseFixed(instr->right(), r0);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
@@ -1244,16 +1266,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1399,12 +1411,10 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
-  LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
-  LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
   LCmpT* result = new LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
@@ -1416,8 +1426,8 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
     return new LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
@@ -1444,9 +1454,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1457,6 +1467,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   return new LIsSmiAndBranch(Use(instr->value()));
@@ -1471,6 +1488,17 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
@@ -1734,7 +1762,7 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
   LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1748,14 +1776,11 @@
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  if (instr->check_hole_value()) {
-    LOperand* temp = TempRegister();
-    LOperand* value = UseRegister(instr->value());
-    return AssignEnvironment(new LStoreGlobalCell(value, temp));
-  } else {
-    LOperand* value = UseRegisterAtStart(instr->value());
-    return new LStoreGlobalCell(value, NULL);
-  }
+  LOperand* temp = TempRegister();
+  LOperand* value = UseTempRegister(instr->value());
+  LInstruction* result = new LStoreGlobalCell(value, temp);
+  if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+  return result;
 }
 
 
@@ -1968,6 +1993,26 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, NULL);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), r0);
+    LOperand* fixed_object_reg = FixedTemp(r2);
+    LOperand* new_map_reg = FixedTemp(r3);
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
@@ -2025,8 +2070,14 @@
 }
 
 
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+    HObjectLiteralGeneric* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 8c18760..6051ad9 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -107,10 +107,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -132,7 +134,8 @@
   V(NumberTagD)                                 \
   V(NumberTagI)                                 \
   V(NumberUntagD)                               \
-  V(ObjectLiteral)                              \
+  V(ObjectLiteralFast)                          \
+  V(ObjectLiteralGeneric)                       \
   V(OsrEntry)                                   \
   V(OuterContext)                               \
   V(Parameter)                                  \
@@ -162,6 +165,7 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
@@ -627,16 +631,17 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
  public:
-  explicit LIsNullAndBranch(LOperand* value) {
+  explicit LIsNilAndBranch(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -656,6 +661,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -684,6 +703,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -794,18 +830,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -1226,7 +1259,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1259,7 +1292,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1276,7 +1308,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1379,12 +1413,17 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
  public:
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1560,7 +1599,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1580,7 +1618,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1642,7 +1680,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1668,6 +1706,30 @@
 };
 
 
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
 class LStringAdd: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringAdd(LOperand* left, LOperand* right) {
@@ -1838,10 +1900,17 @@
 };
 
 
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
 };
 
 
@@ -2159,12 +2228,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 4a201ab..0a4a691 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -67,6 +67,14 @@
   status_ = GENERATING;
   CpuFeatures::Scope scope1(VFP3);
   CpuFeatures::Scope scope2(ARMv7);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -135,7 +143,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -190,13 +198,11 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+        // Update the write barrier. This clobbers r3 and r0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -238,6 +244,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -370,6 +379,12 @@
 }
 
 
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
 Operand LCodeGen::ToOperand(LOperand* op) {
   if (op->IsConstantOperand()) {
     LConstantOperand* const_op = LConstantOperand::cast(op);
@@ -699,7 +714,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(expected_safepoint_kind_ == kind);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -986,6 +1001,7 @@
     virtual void Generate() {
       codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LDivI* instr_;
   };
@@ -1649,30 +1665,44 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  __ cmp(ToRegister(left), ToRegister(right));
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Condition cond = TokenToCondition(instr->op(), false);
 
-  if (instr->is_double()) {
-    // Compare left and right as doubles and load the
-    // resulting flags into the normal status register.
-    __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
-    // If a NaN is involved, i.e. the result is unordered (V set),
-    // jump to false block label.
-    __ b(vs, chunk_->GetAssemblyLabel(false_block));
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
   } else {
-    EmitCmpI(left, right);
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered (V set),
+      // jump to false block label.
+      __ b(vs, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToRegister(left),
+               Operand(ToInteger32(LConstantOperand::cast(right))));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToRegister(right),
+               Operand(ToInteger32(LConstantOperand::cast(left))));
+        // We transposed the operands. Reverse the condition.
+        cond = ReverseCondition(cond);
+      } else {
+        __ cmp(ToRegister(left), ToRegister(right));
+      }
+    }
+    EmitBranch(true_block, false_block, cond);
   }
-
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -1697,25 +1727,35 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register scratch = scratch0();
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(ip, nil_value);
   __ cmp(reg, ip);
-  if (instr->is_strict()) {
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, eq);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ b(eq, true_label);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(ip, other_nil_value);
     __ cmp(reg, ip);
     __ b(eq, true_label);
     __ JumpIfSmi(reg, false_label);
@@ -1772,6 +1812,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsString(reg, temp1, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1797,6 +1862,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1872,28 +1972,36 @@
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
-  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
-  __ b(lt, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ b(ge, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+    __ b(lt, is_false);
+    __ b(eq, is_true);
+    __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+    __ b(eq, is_true);
   } else {
-    __ b(ge, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ b(gt, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1970,9 +2078,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -2002,7 +2109,10 @@
   // We use Factory::the_hole_value() on purpose instead of loading from the
   // root array to force relocation to be able to later patch with
   // the cached map.
-  __ mov(ip, Operand(factory()->the_hole_value()));
+  Handle<JSGlobalPropertyCell> cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ mov(ip, Operand(Handle<Object>(cell)));
+  __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
   __ cmp(map, Operand(ip));
   __ b(ne, &cache_miss);
   // We use Factory::the_hole_value() on purpose instead of loading from the
@@ -2078,26 +2188,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -2106,9 +2196,6 @@
   __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
 
   Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   __ LoadRoot(ToRegister(instr->result()),
               Heap::kTrueValueRootIndex,
               condition);
@@ -2137,7 +2224,7 @@
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
     DeoptimizeIf(eq, instr->environment());
@@ -2160,6 +2247,7 @@
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
 
   // Load the cell.
   __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2168,8 +2256,7 @@
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted.
-  if (instr->hydrogen()->check_hole_value()) {
-    Register scratch2 = ToRegister(instr->TempAt(0));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ ldr(scratch2,
            FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2179,6 +2266,7 @@
 
   // Store the value.
   __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2187,7 +2275,7 @@
   ASSERT(ToRegister(instr->value()).is(r0));
 
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2204,10 +2292,20 @@
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ str(value, ContextOperand(context, instr->slot_index()));
-  if (instr->needs_write_barrier()) {
-    int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, Operand(offset), value, scratch0());
+  MemOperand target = ContextOperand(context, instr->slot_index());
+  __ str(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              kLRHasBeenSaved,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
   }
 }
 
@@ -2228,7 +2326,7 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
   ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2457,13 +2555,9 @@
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // TODO(danno): If no hole check is required, there is no need to allocate
-    // elements into a temporary register, instead scratch can be used.
-    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-    __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
-  }
+  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  DeoptimizeIf(eq, instr->environment());
 
   __ vldr(result, elements, 0);
 }
@@ -2534,6 +2628,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -2694,7 +2789,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2860,6 +2955,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -3063,6 +3159,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(d2));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(d2));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3102,6 +3206,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -3151,12 +3258,12 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -3210,19 +3317,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ str(value, FieldMemOperand(object, offset));
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, Operand(offset), value, scratch);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          kLRHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
     __ str(value, FieldMemOperand(scratch, offset));
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(scratch, Operand(offset), value, object);
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          kLRHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -3234,7 +3358,7 @@
 
   // Name is always in r2.
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3253,6 +3377,13 @@
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   Register scratch = scratch0();
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ tst(value, Operand(kSmiTagMask));
+    DeoptimizeIf(ne, instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3266,9 +3397,18 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
-    __ add(key, scratch, Operand(FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kLRHasBeenSaved,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3369,6 +3509,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -3383,13 +3524,55 @@
   ASSERT(ToRegister(instr->key()).is(r1));
   ASSERT(ToRegister(instr->value()).is(r0));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(from_map));
+  __ b(ne, &not_applicable);
+  __ mov(new_map_reg, Operand(to_map));
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(r2));
+    ASSERT(new_map_reg.is(r3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(r2));
+    ASSERT(new_map_reg.is(r3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   __ push(ToRegister(instr->left()));
   __ push(ToRegister(instr->right()));
@@ -3404,87 +3587,19 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ tst(result, Operand(kIsIndirectStringMask));
-  __ b(eq, &check_sequential);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ tst(result, Operand(kSlicedNotConsMask));
-  __ b(eq, &cons_string);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ add(index, index, Operand(result, ASR, kSmiTagSize));
-  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(result, ip);
-  __ b(ne, deferred->entry());
-  // Get the first of the two strings and load its instance type.
-  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result, Operand(kStringRepresentationMask));
-  __ b(ne, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ tst(result, Operand(kStringEncodingMask));
-  __ b(ne, &ascii_string);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  __ add(result,
-         string,
-         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ ldrh(result, MemOperand(result, index, LSL, 1));
-  __ jmp(&done);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ add(result,
-         string,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ ldrb(result, MemOperand(result, index));
-
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3527,6 +3642,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3598,6 +3714,7 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3663,6 +3780,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3771,16 +3889,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Register input_reg = ToRegister(instr->InputAt(0));
   Register scratch1 = scratch0();
@@ -3863,6 +3971,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -4102,10 +4220,15 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
+  __ mov(r1, Operand(constant_elements));
   __ Push(r3, r2, r1);
 
   // Pick the right runtime function or stub to call.
@@ -4122,26 +4245,106 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        constant_elements_kind == FAST_DOUBLE_ELEMENTS
+        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(r2));
+  ASSERT(!result.is(r2));
+
+  // Increase the offset so that subsequent objects end up right after
+  // this one.
+  int current_offset = *offset;
+  int size = object->map()->instance_size();
+  *offset += size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  ASSERT(object->elements()->length() == 0 ||
+         object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    __ ldr(r2, FieldMemOperand(source, i));
+    __ str(r2, FieldMemOperand(result, current_offset + i));
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ add(r2, result, Operand(*offset));
+      __ str(r2, FieldMemOperand(result, total_offset));
+      LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+      __ str(r2, FieldMemOperand(result, total_offset));
+    } else {
+      __ mov(r2, Operand(value));
+      __ str(r2, FieldMemOperand(result, total_offset));
+    }
+  }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ mov(r0, Operand(Smi::FromInt(size)));
+  __ push(r0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
   __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
   __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
-  __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+  __ mov(r2, Operand(constant_properties));
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  __ mov(r1, Operand(Smi::FromInt(flags)));
   __ Push(r4, r3, r2, r1);
 
   // Pick the right runtime function to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -4214,8 +4417,7 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ mov(r1, Operand(shared_info));
     __ push(r1);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -4248,8 +4450,9 @@
                                                   false_label,
                                                   input,
                                                   instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -4295,10 +4498,12 @@
     final_branch_condition = ne;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CompareObjectType(input, input, scratch,
-                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    final_branch_condition = ge;
+    __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+    __ b(eq, true_label);
+    __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+    final_branch_condition = eq;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4317,9 +4522,7 @@
     final_branch_condition = eq;
 
   } else {
-    final_branch_condition = ne;
     __ b(false_label);
-    // A dead branch instruction will be generated after this point.
   }
 
   return final_branch_condition;
@@ -4430,6 +4633,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 0e34c9f..e9dd149 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -87,6 +87,7 @@
                                         SwVfpRegister flt_scratch,
                                         DoubleRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
   MemOperand ToMemOperand(LOperand* op) const;
   // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
@@ -140,8 +141,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -207,7 +208,7 @@
                                LInstruction* instr);
 
   // Generate a direct call to a known function.  Expects the function
-  // to be in edi.
+  // to be in r1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
@@ -241,6 +242,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -262,7 +264,6 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         DoubleRegister result,
                         bool deoptimize_on_undefined,
@@ -271,8 +272,10 @@
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -282,6 +285,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
@@ -291,6 +301,13 @@
                                        Handle<Map> type,
                                        Handle<String> name);
 
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -378,16 +395,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -398,6 +419,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 7a1f802..4fc3b03 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -395,40 +396,14 @@
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond) {
-  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
+  ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
 }
 
 
 void MacroAssembler::StoreRoot(Register source,
                                Heap::RootListIndex index,
                                Condition cond) {
-  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register address,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, ne, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
-  }
-
-  // Calculate page address.
-  Bfc(object, 0, kPageSizeBits);
-
-  // Calculate region number.
-  Ubfx(address, address, Page::kRegionSizeLog2,
-       kPageSizeBits - Page::kRegionSizeLog2);
-
-  // Mark region dirty.
-  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-  mov(ip, Operand(1));
-  orr(scratch, scratch, Operand(ip, LSL, address));
-  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+  str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
 }
 
 
@@ -443,38 +418,52 @@
 }
 
 
-// Will clobber 4 registers: object, offset, scratch, ip.  The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
-                                 Operand offset,
-                                 Register scratch0,
-                                 Register scratch1) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    LinkRegisterStatus lr_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch0, eq, &done);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
 
-  // Add offset into the object.
-  add(scratch0, object, offset);
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
 
-  // Record the actual write.
-  RecordWriteHelper(object, scratch0, scratch1);
+  add(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+    b(eq, &ok);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              lr_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -484,29 +473,103 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register scratch) {
+                                 Register value,
+                                 LinkRegisterStatus lr_status,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+  ASSERT(!address.is(cp) && !value.is(cp));
+
+  if (FLAG_debug_code) {
+    Label ok;
+    ldr(ip, MemOperand(address));
+    cmp(ip, value);
+    b(eq, &ok);
+    stop("Wrong address or value passed to RecordWrite");
+    bind(&ok);
+  }
 
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch, eq, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    tst(value, Operand(kSmiTagMask));
+    b(eq, &done);
+  }
+
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
 
   // Record the actual write.
-  RecordWriteHelper(object, address, scratch);
+  if (lr_status == kLRHasNotBeenSaved) {
+    push(lr);
+  }
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(lr);
+  }
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(address, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(ip, Operand(store_buffer));
+  ldr(scratch, MemOperand(ip));
+  // Store pointer to buffer and increment buffer top.
+  str(address, MemOperand(scratch, kPointerSize, PostIndex));
+  // Write back new top of buffer.
+  str(scratch, MemOperand(ip));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kFallThroughAtEnd) {
+    b(eq, &done);
+  } else {
+    ASSERT(and_then == kReturnAtEnd);
+    Ret(eq);
+  }
+  push(lr);
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(lr);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
   }
 }
 
@@ -961,6 +1024,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -988,6 +1054,9 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1011,6 +1080,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -1031,28 +1103,23 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  mov(r1, Operand(Handle<JSFunction>(function)));
+  mov(r1, Operand(function));
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
-  } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
 }
 
 
@@ -1090,56 +1157,58 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   mov(r0, Operand(0, RelocInfo::NONE));
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif
 
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+                                    HandlerType type,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The pc (return address) is passed in register lr.
+  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+  // We will build up the handler from the bottom by pushing on the stack.
+  // First compute the state.
+  unsigned state = StackHandler::OffsetField::encode(handler_index);
   if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      mov(r3, Operand(StackHandler::TRY_CATCH));
-    } else {
-      mov(r3, Operand(StackHandler::TRY_FINALLY));
-    }
-    stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
-    // Save the current handler as the next handler.
-    mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    ldr(r1, MemOperand(r3));
-    push(r1);
-    // Link this handler as the new current one.
-    str(sp, MemOperand(r3));
+    state |= (type == TRY_CATCH_HANDLER)
+        ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+        : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
   } else {
-    // Must preserve r0-r4, r5-r7 are available.
     ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for fp. We expect the code throwing an exception to check fp
-    // before dereferencing it to restore the context.
-    mov(r5, Operand(StackHandler::ENTRY));  // State.
-    mov(r6, Operand(Smi::FromInt(0)));  // Indicates no context.
-    mov(r7, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
-    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
-    // Save the current handler as the next handler.
-    mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    ldr(r6, MemOperand(r7));
-    push(r6);
-    // Link this handler as the new current one.
-    str(sp, MemOperand(r7));
+    state |= StackHandler::KindField::encode(StackHandler::ENTRY);
   }
+
+  // Set up the code object (r5) and the state (r6) for pushing.
+  mov(r5, Operand(CodeObject()));
+  mov(r6, Operand(state));
+
+  // Push the frame pointer, context, state, and code object.
+  if (try_location == IN_JAVASCRIPT) {
+    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
+  } else {
+    mov(r7, Operand(Smi::FromInt(0)));  // Indicates no context.
+    mov(ip, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
+    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+  }
+
+  // Link the current handler as the next handler.
+  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  ldr(r5, MemOperand(r6));
+  push(r5);
+  // Set this new handler as the current one.
+  str(sp, MemOperand(r6));
 }
 
 
@@ -1152,42 +1221,50 @@
 }
 
 
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // r0 = exception, r1 = code object, r2 = state.
+  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
+  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
+  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
+  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  add(pc, r1, Operand(r2, ASR, kSmiTagSize));  // Jump.
+}
+
+
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // r0 is expected to hold the exception.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in r0.
   if (!value.is(r0)) {
     mov(r0, value);
   }
-
-  // Drop the sp to the top of the handler.
+  // Drop the stack pointer to the top of the top handler.
   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
-
   // Restore the next handler.
   pop(r2);
   str(r2, MemOperand(r3));
 
-  // Restore context and frame pointer, discard state (r3).
-  ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
+  // Get the code object (r1) and state (r2).  Restore the context and frame
+  // pointer.
+  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
-  // of them.
-  cmp(r3, Operand(StackHandler::ENTRY));
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
+  tst(cp, cp);
   str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 
-#ifdef DEBUG
-  if (emit_debug_code()) {
-    mov(lr, Operand(pc));
-  }
-#endif
-  pop(pc);
+  JumpToHandlerEntry();
 }
 
 
@@ -1196,41 +1273,16 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // r0 is expected to hold the exception.
-  if (!value.is(r0)) {
-    mov(r0, value);
-  }
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // Drop sp to the top stack handler.
-  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-  ldr(sp, MemOperand(r3));
-
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  ldr(r2, MemOperand(sp, kStateOffset));
-  cmp(r2, Operand(StackHandler::ENTRY));
-  b(eq, &done);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  ldr(sp, MemOperand(sp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
-
-  // Set the top handler address to next handler past the current ENTRY handler.
-  pop(r2);
-  str(r2, MemOperand(r3));
-
+  // The exception is expected in r0.
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress, isolate());
+    ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                      isolate());
     mov(r0, Operand(false, RelocInfo::NONE));
     mov(r2, Operand(external_caught));
     str(r0, MemOperand(r2));
@@ -1241,22 +1293,34 @@
     mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                       isolate())));
     str(r0, MemOperand(r2));
+  } else if (!value.is(r0)) {
+    mov(r0, value);
   }
 
-  // Stack layout at this point. See also StackHandlerConstants.
-  // sp ->   state (ENTRY)
-  //         cp
-  //         fp
-  //         lr
+  // Drop the stack pointer to the top of the top stack handler.
+  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  ldr(sp, MemOperand(r3));
 
-  // Restore context and frame pointer, discard state (r2).
-  ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
-  if (emit_debug_code()) {
-    mov(lr, Operand(pc));
-  }
-#endif
-  pop(pc);
+  // Unwind the handlers until the ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind);
+  bind(&fetch_next);
+  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::ENTRY == 0);
+  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  tst(r2, Operand(StackHandler::KindField::kMask));
+  b(ne, &fetch_next);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(r2);
+  str(r2, MemOperand(r3));
+  // Get the code object (r1) and state (r2).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
+
+  JumpToHandlerEntry();
 }
 
 
@@ -1337,34 +1401,6 @@
 }
 
 
-void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
-  // First of all we assign the hash seed to scratch.
-  LoadRoot(scratch, Heap::kHashSeedRootIndex);
-  SmiUntag(scratch);
-
-  // Xor original key with a seed.
-  eor(t0, t0, Operand(scratch));
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  mvn(scratch, Operand(t0));
-  add(t0, scratch, Operand(t0, LSL, 15));
-  // hash = hash ^ (hash >> 12);
-  eor(t0, t0, Operand(t0, LSR, 12));
-  // hash = hash + (hash << 2);
-  add(t0, t0, Operand(t0, LSL, 2));
-  // hash = hash ^ (hash >> 4);
-  eor(t0, t0, Operand(t0, LSR, 4));
-  // hash = hash * 2057;
-  mov(scratch, Operand(2057));
-  mul(t0, t0, scratch);
-  // hash = hash ^ (hash >> 16);
-  eor(t0, t0, Operand(t0, LSR, 16));
-}
-
-
 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                               Register elements,
                                               Register key,
@@ -1394,10 +1430,26 @@
   // t2 - used for the index into the dictionary.
   Label done;
 
-  GetNumberHash(t0, t1);
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  mvn(t1, Operand(t0));
+  add(t0, t1, Operand(t0, LSL, 15));
+  // hash = hash ^ (hash >> 12);
+  eor(t0, t0, Operand(t0, LSR, 12));
+  // hash = hash + (hash << 2);
+  add(t0, t0, Operand(t0, LSL, 2));
+  // hash = hash ^ (hash >> 4);
+  eor(t0, t0, Operand(t0, LSR, 4));
+  // hash = hash * 2057;
+  mov(t1, Operand(2057));
+  mul(t0, t0, t1);
+  // hash = hash ^ (hash >> 16);
+  eor(t0, t0, Operand(t0, LSR, 16));
 
   // Compute the capacity mask.
-  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
   mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
   sub(t1, t1, Operand(1));
 
@@ -1408,17 +1460,17 @@
     mov(t2, t0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+      add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
     }
     and_(t2, t2, Operand(t1));
 
     // Scale the index by multiplying by the element size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    ASSERT(NumberDictionary::kEntrySize == 3);
     add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
 
     // Check if the key is identical to the name.
     add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
-    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+    ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
     cmp(key, Operand(ip));
     if (i != kProbes - 1) {
       b(eq, &done);
@@ -1431,14 +1483,14 @@
   // Check that the value is a normal property.
   // t2: elements + (index * kPointerSize)
   const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   ldr(t1, FieldMemOperand(t2, kDetailsOffset));
   tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   b(ne, miss);
 
   // Get the value at the masked, scaled index and return.
   const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+      NumberDictionary::kElementsStartOffset + kPointerSize;
   ldr(result, FieldMemOperand(t2, kValueOffset));
 }
 
@@ -1548,6 +1600,7 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
+  ASSERT(!object_size.is(ip));
   ASSERT(!result.is(ip));
   ASSERT(!scratch1.is(ip));
   ASSERT(!scratch2.is(ip));
@@ -1805,13 +1858,127 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
   b(hi, fail);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(ls, fail);
+  cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+  b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Register scratch,
+                                              Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(hi, fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+                                                 Register key_reg,
+                                                 Register receiver_reg,
+                                                 Register elements_reg,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Register scratch3,
+                                                 Register scratch4,
+                                                 Label* fail) {
+  Label smi_value, maybe_nan, have_double_value, is_nan, done;
+  Register mantissa_reg = scratch2;
+  Register exponent_reg = scratch3;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg,
+           scratch1,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+  ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  cmp(exponent_reg, scratch1);
+  b(ge, &maybe_nan);
+
+  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  bind(&have_double_value);
+  add(scratch1, elements_reg,
+      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  str(exponent_reg, FieldMemOperand(scratch1, offset));
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  b(gt, &is_nan);
+  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  cmp(mantissa_reg, Operand(0));
+  b(eq, &have_double_value);
+  bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  jmp(&have_double_value);
+
+  bind(&smi_value);
+  add(scratch1, elements_reg,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  add(scratch1, scratch1,
+      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  // scratch1 is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(VFP3)) {
+    destination = FloatingPointHelper::kVFPRegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(this,
+                                          untagged_value,
+                                          destination,
+                                          d0,
+                                          mantissa_reg,
+                                          exponent_reg,
+                                          scratch4,
+                                          s2);
+  if (destination == FloatingPointHelper::kVFPRegisters) {
+    CpuFeatures::Scope scope(VFP3);
+    vstr(d0, scratch1, 0);
+  } else {
+    str(mantissa_reg, MemOperand(scratch1, 0));
+    str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
@@ -1862,7 +2029,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -1870,6 +2038,16 @@
   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   b(ne, miss);
 
+  if (miss_on_bound_function) {
+    ldr(scratch,
+        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    ldr(scratch,
+        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    tst(scratch,
+        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+    b(ne, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -1907,47 +2085,24 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Handle<Code> code(Code::cast(result));
-  Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -2010,14 +2165,10 @@
   mov(pc, lr);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result
-      = TryTailCallExternalReference(
-          ExternalReference(Runtime::kPromoteScheduledException, isolate()),
-          0,
-          1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -2029,8 +2180,12 @@
       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
+}
 
-  return result;
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
 }
 
 
@@ -2429,8 +2584,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1);
-  stub.SaveDoubles();
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -2457,17 +2611,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  mov(r0, Operand(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -2488,21 +2631,12 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-#if defined(__thumb__)
-  // Thumb mode builtin.
-  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
-  mov(r1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(r2, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(r2));
@@ -2634,14 +2768,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   mov(r0, Operand(p0));
   push(r0);
   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   push(r0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   if (is_const_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -2942,6 +3082,19 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  b(&entry);
+  bind(&loop);
+  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  b(lt, &loop);
+}
+
+
 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
                                        Register source,  // Input.
                                        Register scratch) {
@@ -2953,8 +3106,10 @@
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   clz(zeros, source);  // This instruction is only supported after ARM5.
 #else
-  mov(zeros, Operand(0, RelocInfo::NONE));
+  // Order of the next two lines is important: zeros register
+  // can be the same as source register.
   Move(scratch, source);
+  mov(zeros, Operand(0, RelocInfo::NONE));
   // Top 16.
   tst(scratch, Operand(0xffff0000));
   add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3101,23 +3256,15 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_reg_arguments,
                                    int num_double_arguments) {
-  CallCFunctionHelper(no_reg,
-                      function,
-                      ip,
-                      num_reg_arguments,
-                      num_double_arguments);
+  mov(ip, Operand(function));
+  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                     Register scratch,
-                                     int num_reg_arguments,
-                                     int num_double_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_reg_arguments,
-                      num_double_arguments);
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
 }
 
 
@@ -3128,17 +3275,15 @@
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   Register scratch,
                                    int num_arguments) {
-  CallCFunction(function, scratch, num_arguments, 0);
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         ExternalReference function_reference,
-                                         Register scratch,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3162,10 +3307,6 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (function.is(no_reg)) {
-    mov(scratch, Operand(function_reference));
-    function = scratch;
-  }
   Call(function);
   int stack_passed_arguments = CalculateStackPassedWords(
       num_reg_arguments, num_double_arguments);
@@ -3197,6 +3338,185 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  tst(scratch, Operand(mask));
+  b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(ip, Operand(mask_scratch));
+  b(first_bit == 1 ? eq : ne, &other_color);
+  // Shift left 1 by adding.
+  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+  b(eq, &word_boundary);
+  tst(ip, Operand(mask_scratch));
+  b(second_bit == 1 ? ne : eq, has_color);
+  jmp(&other_color);
+
+  bind(&word_boundary);
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+  tst(ip, Operand(1));
+  b(second_bit == 1 ? ne : eq, has_color);
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  Label is_data_object;
+  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  b(eq, &is_data_object);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, not_data_object);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+  mov(ip, Operand(1));
+  mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(mask_scratch, load_scratch);
+  b(ne, &done);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // LSL may overflow, making the check conservative.
+    tst(load_scratch, Operand(mask_scratch, LSL, 1));
+    b(eq, &ok);
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+  b(eq, &is_data_object);
+
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  tst(instance_type, Operand(kExternalStringTag));
+  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+  b(ne, &is_data_object);
+
+  // Sequential string, either ASCII or UC16.
+  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+  tst(instance_type, Operand(kStringEncodingMask));
+  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  orr(ip, ip, Operand(mask_scratch));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  add(ip, ip, Operand(length));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   Usat(output_reg, 8, Operand(input_reg));
 }
@@ -3246,6 +3566,17 @@
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 0546e6a..2725883 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -29,6 +29,7 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -38,12 +39,12 @@
 // Static helper functions
 
 // Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
+inline MemOperand FieldMemOperand(Register object, int offset) {
   return MemOperand(object, offset - kHeapObjectTag);
 }
 
 
-static inline Operand SmiUntagOperand(Register object) {
+inline Operand SmiUntagOperand(Register object) {
   return Operand(object, ASR, kSmiTagSize);
 }
 
@@ -51,7 +52,7 @@
 
 // Give alias names to registers
 const Register cp = { 8 };  // JavaScript context pointer
-const Register kRootRegister = { 10 };  // Roots array pointer.
+const Register roots = { 10 };  // Roots array pointer.
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -79,6 +80,14 @@
 };
 
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -157,40 +166,126 @@
                  Heap::RootListIndex index,
                  Condition cond = al);
 
+  // ---------------------------------------------------------------------------
+  // GC Support
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise
-                  Label* branch);
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
 
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
-  // For the page containing |object| mark the region covering [address]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  void RecordWriteHelper(Register object,
-                         Register address,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // For the page containing |object| mark the region covering
-  // [object+offset] dirty. The object address must be in the first 8K
-  // of an allocated page.  The 'scratch' registers are used in the
-  // implementation and all 3 registers are clobbered by the
-  // operation, as well as the ip register. RecordWrite updates the
-  // write barrier even when storing smis.
-  void RecordWrite(Register object,
-                   Operand offset,
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
                    Register scratch0,
-                   Register scratch1);
+                   Register scratch1,
+                   Label* on_black);
 
-  // For the page containing |object| mark the region covering
-  // [address] dirty. The object address must be in the first 8K of an
-  // allocated page.  All 3 registers are clobbered by the operation,
-  // as well as the ip register. RecordWrite updates the write barrier
-  // even when storing smis.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register scratch);
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, ie it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     lr_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // Push a handle.
   void Push(Handle<Object> handle);
@@ -225,8 +320,11 @@
   }
 
   // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2,
-            Register src3, Register src4, Condition cond = al) {
+  void Push(Register src1,
+            Register src2,
+            Register src3,
+            Register src4,
+            Condition cond = al) {
     ASSERT(!src1.is(src2));
     ASSERT(!src2.is(src3));
     ASSERT(!src1.is(src3));
@@ -265,6 +363,57 @@
     }
   }
 
+  // Pop three registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+      } else {
+        ldr(src3, MemOperand(sp, 4, PostIndex), cond);
+        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+      }
+    } else {
+      Pop(src2, src3, cond);
+      str(src1, MemOperand(sp, 4, PostIndex), cond);
+    }
+  }
+
+  // Pop four registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1,
+           Register src2,
+           Register src3,
+           Register src4,
+           Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    ASSERT(!src1.is(src4));
+    ASSERT(!src2.is(src4));
+    ASSERT(!src3.is(src4));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        if (src3.code() > src4.code()) {
+          ldm(ia_w,
+              sp,
+              src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+              cond);
+        } else {
+          ldr(src4, MemOperand(sp, 4, PostIndex), cond);
+          ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+        }
+      } else {
+        Pop(src3, src4, cond);
+        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
+      }
+    } else {
+      Pop(src2, src3, src4, cond);
+      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
+    }
+  }
+
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -318,16 +467,6 @@
             const double imm,
             const Condition cond = al);
 
-
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // stack_space - extra stack space, used for alignment before call to C.
   void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -350,12 +489,6 @@
                                     Register map,
                                     Register scratch);
 
-  void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    mov(kRootRegister, Operand(roots_address));
-  }
-
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -387,7 +520,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       CallKind call_kind);
@@ -416,9 +549,9 @@
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register lr.
-  // On exit, r0 contains TOS (code slot).
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  void PushTryHandler(CodeLocation try_location,
+                      HandlerType type,
+                      int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
@@ -441,7 +574,6 @@
                               Register scratch,
                               Label* miss);
 
-  void GetNumberHash(Register t0, Register scratch);
 
   void LoadFromNumberDictionary(Label* miss,
                                 Register elements,
@@ -576,6 +708,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -587,7 +726,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
@@ -615,6 +755,31 @@
                          Register scratch,
                          Label* fail);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Register scratch,
+                                Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register value_reg,
+                                   Register key_reg,
+                                   Register receiver_reg,
+                                   Register elements_reg,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   Register scratch4,
+                                   Label* fail);
+
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
@@ -761,20 +926,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
-
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = al);
-
   // Call a runtime routine.
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -793,12 +947,6 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -837,28 +985,25 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
   void CallCFunction(ExternalReference function,
                      int num_reg_arguments,
                      int num_double_arguments);
-  void CallCFunction(Register function, Register scratch,
+  void CallCFunction(Register function,
                      int num_reg_arguments,
                      int num_double_arguments);
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  // stack_space - space to be unwound on exit (includes the call js
-  // arguments space and the additional space allocated for the fast call).
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call js arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -909,6 +1054,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // EABI variant for double arguments in use.
   bool use_eabi_hardfloat() {
@@ -1055,10 +1203,12 @@
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   void CallCFunctionHelper(Register function,
-                           ExternalReference function_reference,
-                           Register scratch,
                            int num_reg_arguments,
                            int num_double_arguments);
 
@@ -1074,16 +1224,29 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
@@ -1091,6 +1254,7 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1136,12 +1300,12 @@
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
-static MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index) {
   return MemOperand(context, Context::SlotOffset(index));
 }
 
 
-static inline MemOperand GlobalObjectOperand()  {
+inline MemOperand GlobalObjectOperand()  {
   return ContextOperand(cp, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index cd76edb..b212f9f 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@
     // Isolate.
     __ mov(r3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Check if function returned non-zero for success or zero for failure.
     __ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -611,6 +614,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   // Push arguments
   // Save callee-save registers.
   // Start new stack frame.
@@ -1102,6 +1111,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6af5355..0525529 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@
 // code.
 class ArmDebugger {
  public:
-  explicit ArmDebugger(Simulator* sim);
+  explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
   ~ArmDebugger();
 
   void Stop(Instruction* instr);
@@ -84,11 +84,6 @@
 };
 
 
-ArmDebugger::ArmDebugger(Simulator* sim) {
-  sim_ = sim;
-}
-
-
 ArmDebugger::~ArmDebugger() {
 }
 
@@ -296,6 +291,13 @@
     if (line == NULL) {
       break;
     } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -611,7 +613,6 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
-    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -645,6 +646,12 @@
 }
 
 
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -781,6 +788,8 @@
   registers_[pc] = bad_lr;
   registers_[lr] = bad_lr;
   InitializeCoverage();
+
+  last_debugger_input_ = NULL;
 }
 
 
@@ -1268,9 +1277,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 512 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 256;
+  return reinterpret_cast<uintptr_t>(stack_) + 512;
 }
 
 
@@ -1618,6 +1627,8 @@
   ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
 
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+  // Catch null pointers a little earlier.
+  ASSERT(start_address > 8191 || start_address < 0);
   int reg = 0;
   while (rlist != 0) {
     if ((rlist & 1) != 0) {
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 391ef69..585f1e0 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -194,6 +194,10 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -360,6 +364,9 @@
   bool pc_modified_;
   int icount_;
 
+  // Debugger input.
+  char* last_debugger_input_;
+
   // Icache simulation
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index f856592..d229ae6 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -95,13 +95,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register scratch0,
+                                             Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -138,20 +137,15 @@
   __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
 
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     receiver,
+                                                     properties,
+                                                     name,
+                                                     scratch1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
 }
 
 
@@ -238,7 +232,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -246,8 +243,8 @@
   __ cmp(prototype, ip);
   __ b(ne, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -259,8 +256,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -367,9 +366,9 @@
 // may be clobbered.  Upon branch to miss_label, the receiver and name
 // registers have their original values.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -395,11 +394,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ mov(r2, Operand(Handle<Map>(transition)));
+    __ mov(r2, Operand(transition));
     __ Push(r2, r0);
     __ TailCallExternalReference(
         ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -409,10 +408,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ mov(ip, Operand(Handle<Map>(transition)));
+    __ mov(ip, Operand(transition));
     __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -431,7 +430,13 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +449,13 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register r0).
@@ -455,20 +466,15 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Object* object,
+                                 Handle<Object> object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -501,12 +507,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Operand(Handle<Object>(interceptor)));
+  __ mov(scratch, Operand(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -515,11 +521,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -532,6 +539,7 @@
   __ CallStub(&stub);
 }
 
+
 static const int kFastApiCallArguments = 3;
 
 // Reserves space for the extra arguments to FastHandleApiCall in the
@@ -553,7 +561,7 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
@@ -566,18 +574,18 @@
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(r5, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ mov(r5, Operand(function));
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(r0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(r0, api_call_info);
     __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
   } else {
-    __ Move(r6, Handle<Object>(call_data));
+    __ Move(r6, call_data);
   }
   // Store js function and call data.
   __ stm(ib, sp, r5.bit() | r6.bit());
@@ -586,11 +594,9 @@
   // (refer to layout above).
   __ add(r2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // r0 = v8::Arguments&
@@ -608,17 +614,18 @@
   __ mov(ip, Operand(0));
   __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref = ExternalReference(&fun,
                                             ExternalReference::DIRECT_API_CALL,
                                             masm->isolate());
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  AllowExternalCallThatCantCauseGC scope(masm);
+
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -630,86 +637,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-       !lookup->holder()->IsGlobalObject()) {
-     depth1 =
-         optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
-     if (depth1 == kInvalidProtoDepth) {
-       depth2 =
-           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
-     }
-     can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                            (depth2 != kInvalidProtoDepth);
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
+      }
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -722,9 +706,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -737,10 +721,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -751,10 +736,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -775,64 +757,53 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
-
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
-
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
         5);
-
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ Push(holder, name_);
-
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
-
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
-
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(holder, name_);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch);
@@ -849,52 +820,42 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Operand(Handle<Object>(cell)));
+  __ mov(scratch, Operand(cell));
   __ ldr(scratch,
          FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   __ cmp(scratch, ip);
   __ b(ne, miss);
-  return cell;
 }
 
+
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
 
@@ -1008,13 +969,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1032,83 +993,52 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
-
-      // Branch on the result of the map check.
-      __ b(ne, miss);
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // from now the object is in holder_reg
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
+      Handle<Map> current_map(current->map());
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+      __ cmp(scratch1, Operand(current_map));
       // Branch on the result of the map check.
       __ b(ne, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Operand(Handle<JSObject>(prototype)));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (heap()->InNewSpace(*prototype)) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ mov(reg, Operand(prototype));
+      }
     }
 
     if (save_at_depth == depth) {
@@ -1119,143 +1049,131 @@
     current = prototype;
   }
 
+  // Log the check depth.
+  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
   // Check the holder map.
   __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
   __ cmp(scratch1, Operand(Handle<Map>(current->map())));
   __ b(ne, miss);
 
-  // Log the check depth.
-  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
   GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<Object> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
-                  miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(r0, Operand(Handle<Object>(value)));
+  __ mov(r0, Operand(value));
   __ Ret();
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch3, callback);
     __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ Move(scratch3, Handle<Object>(callback_handle->data()));
+    __ Move(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(r0, sp);  // r0 = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   const int kApiStackSpace = 1;
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
   // scratch2 (internal::Object **args_) as the data.
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1271,9 +1189,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1288,48 +1206,45 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ cmp(r0, scratch1);
+      __ b(eq, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+      // Leave the internal frame.
     }
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ cmp(r0, scratch1);
-    __ b(eq, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1341,21 +1256,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), r0, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ Move(scratch2, Handle<AccessorInfo>(callback));
+      __ Move(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1392,17 +1307,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(r2, Operand(Handle<String>(name)));
+    __ cmp(r2, Operand(name));
     __ b(ne, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1415,7 +1330,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(r0, miss);
   }
 
@@ -1424,15 +1339,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r3, Operand(cell));
   __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1446,30 +1362,26 @@
     __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
     __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ cmp(r4, r3);
-    __ b(ne, miss);
   } else {
-    __ cmp(r1, Operand(Handle<JSFunction>(function)));
-    __ b(ne, miss);
+    __ cmp(r1, Operand(function));
   }
+  __ b(ne, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1489,23 +1401,23 @@
   Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1515,14 +1427,12 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   Register receiver = r1;
-
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1531,8 +1441,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, r3, r0, r4, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1541,10 +1451,8 @@
     __ Ret();
   } else {
     Label call_builtin;
-
     Register elements = r3;
     Register end_elements = r5;
-
     // Get the elements array of the object.
     __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
@@ -1556,7 +1464,7 @@
                 DONT_DO_SMI_CHECK);
 
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements;
 
       // Get the array's length into r0 and calculate new length.
       __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1571,11 +1479,15 @@
       __ cmp(r0, r4);
       __ b(gt, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      Label with_write_barrier;
+      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ JumpIfNotSmi(r4, &with_write_barrier);
+
       // Save new length.
       __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
       // Push the element.
-      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ add(end_elements, elements,
@@ -1585,14 +1497,31 @@
       __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
 
       // Check for a smi.
-      __ JumpIfNotSmi(r4, &with_write_barrier);
-      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-      __ InNewSpace(elements, r4, eq, &exit);
-      __ RecordWriteHelper(elements, end_elements, r4);
+
+      __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(r6, r6, &call_builtin);
+
+      // Save new length.
+      __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ add(end_elements, elements,
+             Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+      __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+      __ RecordWrite(elements,
+                     end_elements,
+                     r4,
+                     kLRHasNotBeenSaved,
+                     kDontSaveFPRegs,
+                     EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1604,6 +1533,15 @@
         __ b(&call_builtin);
       }
 
+      __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(r2, &no_fast_elements_check);
+      __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(r7, r7, &call_builtin);
+      __ bind(&no_fast_elements_check);
+
       Isolate* isolate = masm()->isolate();
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,8 +1568,7 @@
       // Update new_space_allocation_top.
       __ str(r6, MemOperand(r7));
       // Push the argument.
-      __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ str(r6, MemOperand(end_elements));
+      __ str(r2, MemOperand(end_elements));
       // Fill the rest with holes.
       __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
@@ -1656,19 +1593,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1678,25 +1615,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = r1;
   Register elements = r3;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, r4, r0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  r4, r0, name, &miss);
 
   // Get the elements array of the object.
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1745,20 +1679,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1768,21 +1701,19 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1790,13 +1721,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r1;
   Register index = r4;
-  Register scratch = r3;
   Register result = r0;
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1805,20 +1735,19 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1829,22 +1758,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1854,21 +1782,18 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1876,14 +1801,13 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
-                  r1, r3, r4, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  r0, holder, r1, r3, r4, name, &miss);
 
   Register receiver = r0;
   Register index = r4;
-  Register scratch1 = r1;
-  Register scratch2 = r3;
+  Register scratch = r3;
   Register result = r0;
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1892,21 +1816,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1917,22 +1840,21 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, Handle<String>(name));
+  __ Move(r2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1945,22 +1867,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1976,13 +1899,13 @@
   // Convert the smi code to uint16.
   __ and_(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, r0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, r0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -1991,19 +1914,19 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2013,31 +1936,28 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(VFP3)) {
-      return heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope scope_vfp3(VFP3);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2139,19 +2059,19 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2161,25 +2081,22 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
-
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2240,35 +2157,33 @@
 
   __ bind(&miss);
   // r2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -2284,44 +2199,40 @@
   ReserveSpaceForFastApiCall(masm(), r0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
@@ -2336,16 +2247,14 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
                           1, r0, r3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2356,28 +2265,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
         __ b(ge, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(r1, &fast);
@@ -2387,18 +2293,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2411,112 +2317,91 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
-                        r1, r4, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            r0, holder, r3, r1, r4, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         r1,
-                                         r3,
-                                         r4,
-                                         r0,
-                                         &miss);
-  if (result->IsFailure()) {
-      return result;
-  }
+  CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
+                   &miss);
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -2532,39 +2417,31 @@
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
-                  JUMP_FUNCTION, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2573,24 +2450,20 @@
   // -----------------------------------
   Label miss;
 
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     r1, r2, r3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
   __ bind(&miss);
   Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2617,7 +2490,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(r1);  // receiver
-  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
+  __ mov(ip, Operand(callback));  // callback info
   __ Push(ip, r2, r0);
 
   // Do tail-call to the runtime system.
@@ -2636,8 +2509,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2684,9 +2558,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2704,7 +2579,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r4, Operand(cell));
   __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
   __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
   __ cmp(r5, r6);
@@ -2712,6 +2587,7 @@
 
   // Store the value in the cell.
   __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
 
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2728,9 +2604,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- lr    : return address
@@ -2746,15 +2622,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  r1,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2766,14 +2635,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2790,24 +2659,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2816,10 +2680,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<Object> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2836,9 +2700,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2846,17 +2710,9 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          r0,
-                          r2,
-                          r3,
-                          r1,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2866,11 +2722,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2881,7 +2738,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(r0, &miss);
   }
 
@@ -2889,7 +2746,7 @@
   CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
 
   // Get the value from the cell.
-  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(r3, Operand(cell));
   __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2913,9 +2770,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
@@ -2925,7 +2782,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
@@ -2936,11 +2793,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2949,16 +2806,11 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
-                                             r4, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2966,10 +2818,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<Object> value) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2978,7 +2831,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
@@ -2990,9 +2843,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3001,20 +2855,12 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          r1,
-                          r0,
-                          r2,
-                          r3,
-                          r4,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3023,7 +2869,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3032,7 +2879,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadArrayLength(masm(), r1, r2, &miss);
@@ -3043,7 +2890,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3055,7 +2903,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
@@ -3068,7 +2916,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3080,7 +2929,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
 
   // Check the name hasn't changed.
-  __ cmp(r0, Operand(Handle<String>(name)));
+  __ cmp(r0, Operand(name));
   __ b(ne, &miss);
 
   GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
@@ -3092,33 +2941,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
   //  -- r1    : receiver
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(r1,
-                 r2,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3130,11 +2975,9 @@
   int receiver_count = receiver_maps->length();
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ mov(ip, Operand(map));
+    __ mov(ip, Operand(receiver_maps->at(current)));
     __ cmp(r2, ip);
-    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
   }
 
   __ bind(&miss);
@@ -3142,14 +2985,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : name
@@ -3162,17 +3005,12 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
 
   // Check that the name has not changed.
-  __ cmp(r1, Operand(Handle<String>(name)));
+  __ cmp(r1, Operand(name));
   __ b(ne, &miss);
 
   // r3 is used as scratch register. r1 and r2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     r2, r1, r3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3180,11 +3018,12 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3192,29 +3031,25 @@
   //  -- lr    : return address
   //  -- r3    : scratch
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(r2,
-                 r3,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+  __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3227,12 +3062,18 @@
 
   int receiver_count = receiver_maps->length();
   __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ mov(ip, Operand(map));
+  for (int i = 0; i < receiver_count; ++i) {
+    __ mov(ip, Operand(receiver_maps->at(i)));
     __ cmp(r3, ip);
-    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+    } else {
+      Label next_map;
+      __ b(ne, &next_map);
+      __ mov(r3, Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -3240,11 +3081,12 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
   //  -- r1    : constructor
@@ -3290,12 +3132,7 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(r3,
-                        r4,
-                        r5,
-                        r6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3327,7 +3164,7 @@
   // r7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3454,6 +3291,7 @@
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3378,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3784,9 +3623,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3880,6 +3719,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +3783,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +3923,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4157,9 +3999,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4234,8 +4076,10 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4244,7 +4088,7 @@
   //  -- r3    : scratch
   //  -- r4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind;
 
   Register value_reg = r0;
   Register key_reg = r1;
@@ -4277,15 +4121,33 @@
   __ cmp(key_reg, scratch);
   __ b(hs, &miss_force_generic);
 
-  __ add(scratch,
-         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ str(value_reg,
-         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ RecordWrite(scratch,
-                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
-                 receiver_reg , elements_reg);
-
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+    __ mov(receiver_reg, value_reg);
+    __ RecordWrite(elements_reg,  // Object.
+                   scratch,       // Address.
+                   receiver_reg,  // Value.
+                   kLRHasNotBeenSaved,
+                   kDontSaveFPRegs);
+  }
   // value_reg (r0) is preserved.
   // Done.
   __ Ret();
@@ -4294,6 +4156,10 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4309,15 +4175,15 @@
   //  -- r4    : scratch
   //  -- r5    : scratch
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+  Label miss_force_generic, transition_elements_kind;
 
   Register value_reg = r0;
   Register key_reg = r1;
   Register receiver_reg = r2;
-  Register scratch = r3;
-  Register elements_reg = r4;
-  Register mantissa_reg = r5;
-  Register exponent_reg = r6;
+  Register elements_reg = r3;
+  Register scratch1 = r4;
+  Register scratch2 = r5;
+  Register scratch3 = r6;
   Register scratch4 = r7;
 
   // This stub is meant to be tail-jumped to, the receiver must already
@@ -4329,90 +4195,25 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ ldr(scratch,
+    __ ldr(scratch1,
            FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  __ cmp(key_reg, scratch);
+  __ cmp(key_reg, scratch1);
   __ b(hs, &miss_force_generic);
 
-  // Handle smi values specially.
-  __ JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  __ CheckMap(value_reg,
-              scratch,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  __ cmp(exponent_reg, scratch);
-  __ b(ge, &maybe_nan);
-
-  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  __ bind(&have_double_value);
-  __ add(scratch, elements_reg,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ str(exponent_reg, FieldMemOperand(scratch, offset));
-  __ Ret();
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ b(gt, &is_nan);
-  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  __ cmp(mantissa_reg, Operand(0));
-  __ b(eq, &have_double_value);
-  __ bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  __ jmp(&have_double_value);
-
-  __ bind(&smi_value);
-  __ add(scratch, elements_reg,
-         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ add(scratch, scratch,
-         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  // scratch is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(VFP3)) {
-    destination = FloatingPointHelper::kVFPRegisters;
-  } else {
-    destination = FloatingPointHelper::kCoreRegisters;
-  }
-
-  Register untagged_value = receiver_reg;
-  __ SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(
-      masm,
-      untagged_value,
-      destination,
-      d0,
-      mantissa_reg,
-      exponent_reg,
-      scratch4,
-      s2);
-  if (destination == FloatingPointHelper::kVFPRegisters) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vstr(d0, scratch, 0);
-  } else {
-    __ str(mantissa_reg, MemOperand(scratch, 0));
-    __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
-  }
+  __ StoreNumberToDoubleElements(value_reg,
+                                 key_reg,
+                                 receiver_reg,
+                                 elements_reg,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3,
+                                 scratch4,
+                                 &transition_elements_kind);
   __ Ret();
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -4420,6 +4221,10 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/array.js b/src/array.js
index 98fe3ac..3d8e278 100644
--- a/src/array.js
+++ b/src/array.js
@@ -201,17 +201,14 @@
 
 
 function ConvertToLocaleString(e) {
-  if (e == null) {
+  if (IS_NULL_OR_UNDEFINED(e)) {
     return '';
   } else {
-    // e_obj's toLocaleString might be overwritten, check if it is a function.
-    // Call ToString if toLocaleString is not a function.
-    // See issue 877615.
+    // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
+    // must throw a TypeError if ToObject(e).toLocaleString isn't
+    // callable.
     var e_obj = ToObject(e);
-    if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
-      return ToString(e_obj.toLocaleString());
-    else
-      return ToString(e);
+    return %ToString(e_obj.toLocaleString());
   }
 }
 
@@ -331,8 +328,9 @@
     // would be the appropriate test.  We follow KJS in consulting the
     // prototype.
     var current = array[index];
-    if (!IS_UNDEFINED(current) || index in array)
+    if (!IS_UNDEFINED(current) || index in array) {
       deleted_elements[i] = current;
+    }
   }
 }
 
@@ -381,18 +379,31 @@
 
 
 function ArrayToString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
+  var array;
+  var func;
+  if (IS_ARRAY(this)) {
+    func = this.join;
+    if (func === ArrayJoin) {
+      return Join(this, this.length, ',', ConvertToString);
+    }
+    array = this;
+  } else {
+    array = ToObject(this);
+    func = array.join;
   }
-  return Join(this, this.length, ',', ConvertToString);
+  if (!IS_SPEC_FUNCTION(func)) {
+    return %_CallFunction(array, ObjectToString);
+  }
+  return %_CallFunction(array, func);
 }
 
 
 function ArrayToLocaleString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
-  }
-  return Join(this, this.length, ',', ConvertToLocaleString);
+  var array = ToObject(this);
+  var arrayLen = array.length;
+  var len = TO_UINT32(arrayLen);
+  if (len === 0) return "";
+  return Join(array, len, ',', ConvertToLocaleString);
 }
 
 
@@ -485,12 +496,12 @@
 
     if (j_complement <= i) {
       high = j;
-      while (keys[--high_counter] == j);
+      while (keys[--high_counter] == j) { }
       low = j_complement;
     }
     if (j_complement >= i) {
       low = i;
-      while (keys[++low_counter] == i);
+      while (keys[++low_counter] == i) { }
       high = len - i - 1;
     }
 
@@ -566,10 +577,11 @@
 
   var first = this[0];
 
-  if (IS_ARRAY(this))
+  if (IS_ARRAY(this)) {
     SmartMove(this, 0, 1, len, 0);
-  else
+  } else {
     SimpleMove(this, 0, 1, len, 0);
+  }
 
   this.length = len - 1;
 
@@ -586,10 +598,11 @@
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
-  if (IS_ARRAY(this))
+  if (IS_ARRAY(this)) {
     SmartMove(this, 0, 0, len, num_arguments);
-  else
+  } else {
     SimpleMove(this, 0, 0, len, num_arguments);
+  }
 
   for (var i = 0; i < num_arguments; i++) {
     this[i] = %_Arguments(i);
@@ -993,25 +1006,32 @@
                         ["Array.prototype.filter"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = ToUint32(this.length);
-  var result = [];
-  var result_length = 0;
+
+  var result = new $Array();
+  var accumulator = new InternalArray();
+  var accumulator_length = 0;
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (%_CallFunction(receiver, current, i, this, f)) {
-        result[result_length++] = current;
+    var current = array[i];
+    if (!IS_UNDEFINED(current) || i in array) {
+      if (%_CallFunction(receiver, current, i, array, f)) {
+        accumulator[accumulator_length++] = current;
       }
     }
   }
+  %MoveArrayContents(accumulator, result);
   return result;
 }
 
@@ -1022,19 +1042,24 @@
                         ["Array.prototype.forEach"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length =  TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      %_CallFunction(receiver, current, i, this, f);
+    var current = array[i];
+    if (!IS_UNDEFINED(current) || i in array) {
+      %_CallFunction(receiver, current, i, array, f);
     }
   }
 }
@@ -1048,19 +1073,24 @@
                         ["Array.prototype.some"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (%_CallFunction(receiver, current, i, this, f)) return true;
+    var current = array[i];
+    if (!IS_UNDEFINED(current) || i in array) {
+      if (%_CallFunction(receiver, current, i, array, f)) return true;
     }
   }
   return false;
@@ -1073,19 +1103,24 @@
                         ["Array.prototype.every"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (!%_CallFunction(receiver, current, i, this, f)) return false;
+    var current = array[i];
+    if (!IS_UNDEFINED(current) || i in array) {
+      if (!%_CallFunction(receiver, current, i, array, f)) return false;
     }
   }
   return true;
@@ -1097,21 +1132,26 @@
                         ["Array.prototype.map"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = TO_UINT32(array.length);
+
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
+  } else if (!IS_SPEC_OBJECT(receiver)) {
+    receiver = ToObject(receiver);
   }
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = TO_UINT32(this.length);
+
   var result = new $Array();
   var accumulator = new InternalArray(length);
   for (var i = 0; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      accumulator[i] = %_CallFunction(receiver, current, i, this, f);
+    var current = array[i];
+    if (!IS_UNDEFINED(current) || i in array) {
+      accumulator[i] = %_CallFunction(receiver, current, i, array, f);
     }
   }
   %MoveArrayContents(accumulator, result);
@@ -1245,19 +1285,20 @@
                         ["Array.prototype.reduce"]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping and side effects are visible.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping.
-  var length = ToUint32(this.length);
   var i = 0;
-
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i < length; i++) {
-      current = this[i];
-      if (!IS_UNDEFINED(current) || i in this) {
+      current = array[i];
+      if (!IS_UNDEFINED(current) || i in array) {
         i++;
         break find_initial;
       }
@@ -1267,9 +1308,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i < length; i++) {
-    var element = this[i];
-    if (!IS_UNDEFINED(element) || i in this) {
-      current = %_CallFunction(receiver, current, element, i, this, callback);
+    var element = array[i];
+    if (!IS_UNDEFINED(element) || i in array) {
+      current = %_CallFunction(receiver, current, element, i, array, callback);
     }
   }
   return current;
@@ -1281,15 +1322,20 @@
                         ["Array.prototype.reduceRight"]);
   }
 
+  // Pull out the length so that side effects are visible before the
+  // callback function is checked.
+  var array = ToObject(this);
+  var length = ToUint32(array.length);
+
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
-  var i = ToUint32(this.length) - 1;
 
+  var i = length - 1;
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i >= 0; i--) {
-      current = this[i];
-      if (!IS_UNDEFINED(current) || i in this) {
+      current = array[i];
+      if (!IS_UNDEFINED(current) || i in array) {
         i--;
         break find_initial;
       }
@@ -1299,9 +1345,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i >= 0; i--) {
-    var element = this[i];
-    if (!IS_UNDEFINED(element) || i in this) {
-      current = %_CallFunction(receiver, current, element, i, this, callback);
+    var element = array[i];
+    if (!IS_UNDEFINED(element) || i in array) {
+      current = %_CallFunction(receiver, current, element, i, array, callback);
     }
   }
   return current;
@@ -1342,7 +1388,7 @@
   // set their names.
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
-  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
+  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
     "toString", getFunction("toString", ArrayToString),
     "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
     "join", getFunction("join", ArrayJoin),
diff --git a/src/assembler.cc b/src/assembler.cc
index ad5f350..fd8c75e 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -38,6 +38,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "ic-inl.h"
+#include "incremental-marking.h"
 #include "factory.h"
 #include "runtime.h"
 #include "runtime-profiler.h"
@@ -47,6 +48,7 @@
 #include "ast.h"
 #include "regexp-macro-assembler.h"
 #include "platform.h"
+#include "store-buffer.h"
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
@@ -516,6 +518,7 @@
 
 
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
+  rinfo_.host_ = code;
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
   // Relocation info is read backwards.
@@ -736,9 +739,38 @@
   : address_(table_ref.address()) {}
 
 
+ExternalReference ExternalReference::
+    incremental_marking_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    incremental_evacuation_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    store_buffer_overflow_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+}
+
+
+ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+}
+
+
 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(Runtime::PerformGC)));
+  return
+      ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
 }
 
 
@@ -785,11 +817,6 @@
 }
 
 
-ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->global_contexts_list_address());
-}
-
-
 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
   return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
 }
@@ -802,19 +829,8 @@
 }
 
 
-ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
-  return ExternalReference(isolate->factory()->the_hole_value().location());
-}
-
-
-ExternalReference ExternalReference::arguments_marker_location(
-    Isolate* isolate) {
-  return ExternalReference(isolate->factory()->arguments_marker().location());
-}
-
-
-ExternalReference ExternalReference::roots_address(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->roots_address());
+ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->roots_array_start());
 }
 
 
@@ -840,9 +856,14 @@
 }
 
 
+ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
+}
+
+
 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
-  Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
-  return ExternalReference(mask);
+  return ExternalReference(reinterpret_cast<Address>(
+      isolate->heap()->NewSpaceMask()));
 }
 
 
@@ -1025,6 +1046,11 @@
 }
 
 
+static double math_tan_double(double x) {
+  return tan(x);
+}
+
+
 static double math_log_double(double x) {
   return log(x);
 }
@@ -1046,6 +1072,14 @@
 }
 
 
+ExternalReference ExternalReference::math_tan_double_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate,
+                                    FUNCTION_ADDR(math_tan_double),
+                                    BUILTIN_FP_CALL));
+}
+
+
 ExternalReference ExternalReference::math_log_double_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
@@ -1111,6 +1145,23 @@
 }
 
 
+bool EvalComparison(Token::Value op, double op1, double op2) {
+  ASSERT(Token::IsCompareOp(op));
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT: return (op1 == op2);
+    case Token::NE: return (op1 != op2);
+    case Token::LT: return (op1 < op2);
+    case Token::GT: return (op1 > op2);
+    case Token::LTE: return (op1 <= op2);
+    case Token::GTE: return (op1 >= op2);
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
 ExternalReference ExternalReference::double_fp_operation(
     Token::Value operation, Isolate* isolate) {
   typedef double BinaryFPOperation(double x, double y);
diff --git a/src/assembler.h b/src/assembler.h
index d58034d..cec20fc 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -143,6 +143,9 @@
 };
 
 
+enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+
+
 // -----------------------------------------------------------------------------
 // Relocation information
 
@@ -216,8 +219,9 @@
 
 
   RelocInfo() {}
-  RelocInfo(byte* pc, Mode rmode, intptr_t data)
-      : pc_(pc), rmode_(rmode), data_(data) {
+
+  RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
+      : pc_(pc), rmode_(rmode), data_(data), host_(host) {
   }
 
   static inline bool IsConstructCall(Mode mode) {
@@ -226,6 +230,9 @@
   static inline bool IsCodeTarget(Mode mode) {
     return mode <= LAST_CODE_ENUM;
   }
+  static inline bool IsEmbeddedObject(Mode mode) {
+    return mode == EMBEDDED_OBJECT;
+  }
   // Is the relocation mode affected by GC?
   static inline bool IsGCRelocMode(Mode mode) {
     return mode <= LAST_GCED_ENUM;
@@ -258,6 +265,7 @@
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
+  Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
@@ -271,14 +279,17 @@
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
   INLINE(Address target_address());
-  INLINE(void set_target_address(Address target));
+  INLINE(void set_target_address(Address target,
+                                 WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
   INLINE(Object* target_object());
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
-  INLINE(void set_target_object(Object* target));
+  INLINE(void set_target_object(Object* target,
+                                WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
   INLINE(JSGlobalPropertyCell* target_cell());
   INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
-  INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
+  INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
+                              WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
 
 
   // Read the address of the word containing the target_address in an
@@ -353,6 +364,7 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
+  Code* host_;
 #ifdef V8_TARGET_ARCH_MIPS
   // Code and Embedded Object pointers in mips are stored split
   // across two consecutive 32-bit instructions. Heap management
@@ -561,6 +573,13 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
+  static ExternalReference incremental_marking_record_write_function(
+      Isolate* isolate);
+  static ExternalReference incremental_evacuation_record_write_function(
+      Isolate* isolate);
+  static ExternalReference store_buffer_overflow_function(
+      Isolate* isolate);
+  static ExternalReference flush_icache_function(Isolate* isolate);
   static ExternalReference perform_gc_function(Isolate* isolate);
   static ExternalReference fill_heap_number_with_random_function(
       Isolate* isolate);
@@ -571,20 +590,13 @@
   // Deoptimization support.
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
-  static ExternalReference global_contexts_list(Isolate* isolate);
 
   // Static data in the keyed lookup cache.
   static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
   static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
 
-  // Static variable Factory::the_hole_value.location()
-  static ExternalReference the_hole_value_location(Isolate* isolate);
-
-  // Static variable Factory::arguments_marker.location()
-  static ExternalReference arguments_marker_location(Isolate* isolate);
-
-  // Static variable Heap::roots_address()
-  static ExternalReference roots_address(Isolate* isolate);
+  // Static variable Heap::roots_array_start()
+  static ExternalReference roots_array_start(Isolate* isolate);
 
   // Static variable StackGuard::address_of_jslimit()
   static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -606,6 +618,10 @@
   static ExternalReference new_space_start(Isolate* isolate);
   static ExternalReference new_space_mask(Isolate* isolate);
   static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
+  static ExternalReference new_space_mark_bits(Isolate* isolate);
+
+  // Write barrier.
+  static ExternalReference store_buffer_top(Isolate* isolate);
 
   // Used for fast allocation in generated code.
   static ExternalReference new_space_allocation_top_address(Isolate* isolate);
@@ -635,6 +651,7 @@
 
   static ExternalReference math_sin_double_function(Isolate* isolate);
   static ExternalReference math_cos_double_function(Isolate* isolate);
+  static ExternalReference math_tan_double_function(Isolate* isolate);
   static ExternalReference math_log_double_function(Isolate* isolate);
 
   Address address() const {return reinterpret_cast<Address>(address_);}
@@ -799,33 +816,33 @@
 // -----------------------------------------------------------------------------
 // Utility functions
 
-static inline bool is_intn(int x, int n)  {
+inline bool is_intn(int x, int n)  {
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
 
-static inline bool is_int8(int x)  { return is_intn(x, 8); }
-static inline bool is_int16(int x)  { return is_intn(x, 16); }
-static inline bool is_int18(int x)  { return is_intn(x, 18); }
-static inline bool is_int24(int x)  { return is_intn(x, 24); }
+inline bool is_int8(int x)  { return is_intn(x, 8); }
+inline bool is_int16(int x)  { return is_intn(x, 16); }
+inline bool is_int18(int x)  { return is_intn(x, 18); }
+inline bool is_int24(int x)  { return is_intn(x, 24); }
 
-static inline bool is_uintn(int x, int n) {
+inline bool is_uintn(int x, int n) {
   return (x & -(1 << n)) == 0;
 }
 
-static inline bool is_uint2(int x)  { return is_uintn(x, 2); }
-static inline bool is_uint3(int x)  { return is_uintn(x, 3); }
-static inline bool is_uint4(int x)  { return is_uintn(x, 4); }
-static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
-static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
-static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
-static inline bool is_uint10(int x)  { return is_uintn(x, 10); }
-static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
-static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
-static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
-static inline bool is_uint26(int x)  { return is_uintn(x, 26); }
-static inline bool is_uint28(int x)  { return is_uintn(x, 28); }
+inline bool is_uint2(int x)  { return is_uintn(x, 2); }
+inline bool is_uint3(int x)  { return is_uintn(x, 3); }
+inline bool is_uint4(int x)  { return is_uintn(x, 4); }
+inline bool is_uint5(int x)  { return is_uintn(x, 5); }
+inline bool is_uint6(int x)  { return is_uintn(x, 6); }
+inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+inline bool is_uint10(int x)  { return is_uintn(x, 10); }
+inline bool is_uint12(int x)  { return is_uintn(x, 12); }
+inline bool is_uint16(int x)  { return is_uintn(x, 16); }
+inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+inline bool is_uint26(int x)  { return is_uintn(x, 26); }
+inline bool is_uint28(int x)  { return is_uintn(x, 28); }
 
-static inline int NumberOfBitsSet(uint32_t x) {
+inline int NumberOfBitsSet(uint32_t x) {
   unsigned int num_bits_set;
   for (num_bits_set = 0; x; x >>= 1) {
     num_bits_set += x & 1;
@@ -833,6 +850,8 @@
   return num_bits_set;
 }
 
+bool EvalComparison(Token::Value op, double op1, double op2);
+
 // Computes pow(x, y) with the special cases in the spec for Math.pow.
 double power_double_int(double x, int y);
 double power_double_double(double x, double y);
diff --git a/src/ast-inl.h b/src/ast-inl.h
deleted file mode 100644
index 731ad2f..0000000
--- a/src/ast-inl.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_INL_H_
-#define V8_AST_INL_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-SwitchStatement::SwitchStatement(Isolate* isolate,
-                                 ZoneStringList* labels)
-    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-      tag_(NULL), cases_(NULL) {
-}
-
-
-Block::Block(Isolate* isolate,
-             ZoneStringList* labels,
-             int capacity,
-             bool is_initializer_block)
-    : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
-      statements_(capacity),
-      is_initializer_block_(is_initializer_block),
-      block_scope_(NULL) {
-}
-
-
-BreakableStatement::BreakableStatement(Isolate* isolate,
-                                       ZoneStringList* labels,
-                                       Type type)
-    : labels_(labels),
-      type_(type),
-      entry_id_(GetNextId(isolate)),
-      exit_id_(GetNextId(isolate)) {
-  ASSERT(labels == NULL || labels->length() > 0);
-}
-
-
-IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
-    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-      body_(NULL),
-      continue_target_(),
-      osr_entry_id_(GetNextId(isolate)) {
-}
-
-
-DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      cond_(NULL),
-      condition_position_(-1),
-      continue_id_(GetNextId(isolate)),
-      back_edge_id_(GetNextId(isolate)) {
-}
-
-
-WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      cond_(NULL),
-      may_have_function_literal_(true),
-      body_id_(GetNextId(isolate)) {
-}
-
-
-ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      init_(NULL),
-      cond_(NULL),
-      next_(NULL),
-      may_have_function_literal_(true),
-      loop_variable_(NULL),
-      continue_id_(GetNextId(isolate)),
-      body_id_(GetNextId(isolate)) {
-}
-
-
-ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
-    : IterationStatement(isolate, labels),
-      each_(NULL),
-      enumerable_(NULL),
-      assignment_id_(GetNextId(isolate)) {
-}
-
-
-bool FunctionLiteral::strict_mode() const {
-  return scope()->is_strict_mode();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_AST_INL_H_
diff --git a/src/ast.cc b/src/ast.cc
index 418cc43..13e5589 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -48,16 +48,19 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
-Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
-  return (expression()->AsAssignment() != NULL &&
-          !expression()->AsAssignment()->is_compound())
-      ? expression()->AsAssignment()
-      : NULL;
+
+bool Expression::IsSmiLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
 }
 
 
-CountOperation* ExpressionStatement::StatementAsCountOperation() {
-  return expression()->AsCountOperation();
+bool Expression::IsStringLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
+}
+
+
+bool Expression::IsNullLiteral() {
+  return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
 }
 
 
@@ -66,7 +69,6 @@
       name_(var->name()),
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
-      inside_with_(false),
       is_trivial_(false),
       position_(RelocInfo::kNoPosition) {
   BindTo(var);
@@ -76,13 +78,11 @@
 VariableProxy::VariableProxy(Isolate* isolate,
                              Handle<String> name,
                              bool is_this,
-                             bool inside_with,
                              int position)
     : Expression(isolate),
       name_(name),
       var_(NULL),
       is_this_(is_this),
-      inside_with_(inside_with),
       is_trivial_(false),
       position_(position) {
   // Names must be canonicalized for fast equality checks.
@@ -157,6 +157,21 @@
 }
 
 
+int FunctionLiteral::start_position() const {
+  return scope()->start_position();
+}
+
+
+int FunctionLiteral::end_position() const {
+  return scope()->end_position();
+}
+
+
+LanguageMode FunctionLiteral::language_mode() const {
+  return scope()->language_mode();
+}
+
+
 ObjectLiteral::Property::Property(Literal* key, Expression* value) {
   emit_store_ = true;
   key_ = key;
@@ -327,59 +342,80 @@
 }
 
 
+static bool IsTypeof(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
+}
+
+
+// Check for the pattern: typeof <expression> equals <string literal>.
+static bool MatchLiteralCompareTypeof(Expression* left,
+                                      Token::Value op,
+                                      Expression* right,
+                                      Expression** expr,
+                                      Handle<String>* check) {
+  if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
+    *expr = left->AsUnaryOperation()->expression();
+    *check = Handle<String>::cast(right->AsLiteral()->handle());
+    return true;
+  }
+  return false;
+}
+
+
 bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
                                               Handle<String>* check) {
-  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
+      MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
-  Literal* left_literal = left_->AsLiteral();
-  Literal* right_literal = right_->AsLiteral();
 
-  // Check for the pattern: typeof <expression> == <string literal>.
-  if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
-      right_literal != NULL && right_literal->handle()->IsString()) {
-    *expr = left_unary->expression();
-    *check = Handle<String>::cast(right_literal->handle());
+static bool IsVoidOfLiteral(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL &&
+      maybe_unary->op() == Token::VOID &&
+      maybe_unary->expression()->AsLiteral() != NULL;
+}
+
+
+// Check for the pattern: void <literal> equals <expression>
+static bool MatchLiteralCompareUndefined(Expression* left,
+                                         Token::Value op,
+                                         Expression* right,
+                                         Expression** expr) {
+  if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: <string literal> == typeof <expression>.
-  if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
-      left_literal != NULL && left_literal->handle()->IsString()) {
-    *expr = right_unary->expression();
-    *check = Handle<String>::cast(left_literal->handle());
-    return true;
-  }
-
   return false;
 }
 
 
 bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
-  if (op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+      MatchLiteralCompareUndefined(right_, op_, left_, expr);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
 
-  // Check for the pattern: <expression> === void <literal>.
-  if (right_unary != NULL && right_unary->op() == Token::VOID &&
-      right_unary->expression()->AsLiteral() != NULL) {
-    *expr = left_;
+// Check for the pattern: null equals <expression>
+static bool MatchLiteralCompareNull(Expression* left,
+                                    Token::Value op,
+                                    Expression* right,
+                                    Expression** expr) {
+  if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: void <literal> === <expression>.
-  if (left_unary != NULL && left_unary->op() == Token::VOID &&
-      left_unary->expression()->AsLiteral() != NULL) {
-    *expr = right_;
-    return true;
-  }
-
   return false;
 }
 
 
+bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
+  return MatchLiteralCompareNull(left_, op_, right_, expr) ||
+      MatchLiteralCompareNull(right_, op_, left_, expr);
+}
+
+
 // ----------------------------------------------------------------------------
 // Inlining support
 
@@ -447,7 +483,7 @@
 
 
 bool ThisFunction::IsInlineable() const {
-  return false;
+  return true;
 }
 
 
@@ -529,7 +565,9 @@
 
 
 bool VariableProxy::IsInlineable() const {
-  return var()->IsUnallocated() || var()->IsStackAllocated();
+  return var()->IsUnallocated()
+      || var()->IsStackAllocated()
+      || var()->IsContextSlot();
 }
 
 
@@ -598,11 +636,6 @@
 }
 
 
-bool CompareToNull::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
 bool CountOperation::IsInlineable() const {
   return expression()->IsInlineable();
 }
@@ -677,6 +710,10 @@
   TypeInfo info = oracle->SwitchType(this);
   if (info.IsSmi()) {
     compare_type_ = SMI_ONLY;
+  } else if (info.IsSymbol()) {
+    compare_type_ = SYMBOL_ONLY;
+  } else if (info.IsNonSymbol()) {
+    compare_type_ = STRING_ONLY;
   } else if (info.IsNonPrimitive()) {
     compare_type_ = OBJECT_ONLY;
   } else {
@@ -705,7 +742,7 @@
     holder_ = Handle<JSObject>::null();
   }
   while (true) {
-    LookupResult lookup;
+    LookupResult lookup(type->GetIsolate());
     type->LookupInDescriptors(NULL, *name, &lookup);
     // If the function wasn't found directly in the map, we start
     // looking upwards through the prototype chain.
@@ -746,37 +783,41 @@
 
 void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
                               CallKind call_kind) {
-  Property* property = expression()->AsProperty();
-  ASSERT(property != NULL);
-  // Specialize for the receiver types seen at runtime.
-  Literal* key = property->key()->AsLiteral();
-  ASSERT(key != NULL && key->handle()->IsString());
-  Handle<String> name = Handle<String>::cast(key->handle());
-  receiver_types_.Clear();
-  oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    int length = receiver_types_.length();
-    for (int i = 0; i < length; i++) {
-      Handle<Map> map = receiver_types_.at(i);
-      ASSERT(!map.is_null() && *map != NULL);
-    }
-  }
-#endif
   is_monomorphic_ = oracle->CallIsMonomorphic(this);
-  check_type_ = oracle->GetCallCheckType(this);
-  if (is_monomorphic_) {
-    Handle<Map> map;
-    if (receiver_types_.length() > 0) {
-      ASSERT(check_type_ == RECEIVER_MAP_CHECK);
-      map = receiver_types_.at(0);
-    } else {
-      ASSERT(check_type_ != RECEIVER_MAP_CHECK);
-      holder_ = Handle<JSObject>(
-          oracle->GetPrototypeForPrimitiveCheck(check_type_));
-      map = Handle<Map>(holder_->map());
+  Property* property = expression()->AsProperty();
+  if (property == NULL) {
+    // Function call.  Specialize for monomorphic calls.
+    if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
+  } else {
+    // Method call.  Specialize for the receiver types seen at runtime.
+    Literal* key = property->key()->AsLiteral();
+    ASSERT(key != NULL && key->handle()->IsString());
+    Handle<String> name = Handle<String>::cast(key->handle());
+    receiver_types_.Clear();
+    oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      int length = receiver_types_.length();
+      for (int i = 0; i < length; i++) {
+        Handle<Map> map = receiver_types_.at(i);
+        ASSERT(!map.is_null() && *map != NULL);
+      }
     }
-    is_monomorphic_ = ComputeTarget(map, name);
+#endif
+    check_type_ = oracle->GetCallCheckType(this);
+    if (is_monomorphic_) {
+      Handle<Map> map;
+      if (receiver_types_.length() > 0) {
+        ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+        map = receiver_types_.at(0);
+      } else {
+        ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+        holder_ = Handle<JSObject>(
+            oracle->GetPrototypeForPrimitiveCheck(check_type_));
+        map = Handle<Map>(holder_->map());
+      }
+      is_monomorphic_ = ComputeTarget(map, name);
+    }
   }
 }
 
@@ -856,8 +897,6 @@
 FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
 #undef MAKE_TYPE_CASE
 
-RegExpEmpty RegExpEmpty::kInstance;
-
 
 static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
   Interval result = Interval::Empty();
diff --git a/src/ast.h b/src/ast.h
index b56205f..805526a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -90,7 +90,6 @@
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
-  V(CompareToNull)                              \
   V(ThisFunction)
 
 #define AST_NODE_LIST(V)                        \
@@ -119,7 +118,6 @@
 #define DECLARE_NODE_TYPE(type)                                         \
   virtual void Accept(AstVisitor* v);                                   \
   virtual AstNode::Type node_type() const { return AstNode::k##type; }  \
-  virtual type* As##type() { return this; }
 
 
 class AstNode: public ZoneObject {
@@ -154,7 +152,8 @@
 
   // Type testing & conversion functions overridden by concrete subclasses.
 #define DECLARE_NODE_FUNCTIONS(type)                  \
-  virtual type* As##type() { return NULL; }
+  bool Is##type() { return node_type() == AstNode::k##type; }          \
+  type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
   AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
 #undef DECLARE_NODE_FUNCTIONS
 
@@ -197,9 +196,6 @@
 
   virtual Statement* AsStatement()  { return this; }
 
-  virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
-  virtual CountOperation* StatementAsCountOperation() { return NULL; }
-
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -265,7 +261,6 @@
 
   virtual Expression* AsExpression()  { return this; }
 
-  virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
   // Helpers for ToBoolean conversion.
@@ -277,27 +272,24 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() { return false; }
 
-  // Mark the expression as being compiled as an expression
-  // statement. This is used to transform postfix increments to
-  // (faster) prefix increments.
-  virtual void MarkAsStatement() { /* do nothing */ }
-
   // True iff the result can be safely overwritten (to avoid allocation).
   // False for operations that can return one of their operands.
   virtual bool ResultOverwriteAllowed() { return false; }
 
   // True iff the expression is a literal represented as a smi.
-  virtual bool IsSmiLiteral() { return false; }
+  bool IsSmiLiteral();
+
+  // True iff the expression is a string literal.
+  bool IsStringLiteral();
+
+  // True iff the expression is the null literal.
+  bool IsNullLiteral();
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
     return false;
   }
-  virtual bool IsArrayLength() {
-    UNREACHABLE();
-    return false;
-  }
   virtual SmallMapList* GetReceiverTypes() {
     UNREACHABLE();
     return NULL;
@@ -343,7 +335,14 @@
   int ExitId() const { return exit_id_; }
 
  protected:
-  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
+  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
+      : labels_(labels),
+        type_(type),
+        entry_id_(GetNextId(isolate)),
+        exit_id_(GetNextId(isolate)) {
+    ASSERT(labels == NULL || labels->length() > 0);
+  }
+
 
  private:
   ZoneStringList* labels_;
@@ -356,23 +355,19 @@
 
 class Block: public BreakableStatement {
  public:
-  inline Block(Isolate* isolate,
-               ZoneStringList* labels,
-               int capacity,
-               bool is_initializer_block);
+  Block(Isolate* isolate,
+        ZoneStringList* labels,
+        int capacity,
+        bool is_initializer_block)
+      : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+        statements_(capacity),
+        is_initializer_block_(is_initializer_block),
+        block_scope_(NULL) {
+  }
+
 
   DECLARE_NODE_TYPE(Block)
 
-  virtual Assignment* StatementAsSimpleAssignment() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsSimpleAssignment();
-  }
-
-  virtual CountOperation* StatementAsCountOperation() {
-    if (statements_.length() != 1) return NULL;
-    return statements_[0]->StatementAsCountOperation();
-  }
-
   virtual bool IsInlineable() const;
 
   void AddStatement(Statement* statement) { statements_.Add(statement); }
@@ -393,31 +388,32 @@
 class Declaration: public AstNode {
  public:
   Declaration(VariableProxy* proxy,
-              Variable::Mode mode,
+              VariableMode mode,
               FunctionLiteral* fun,
               Scope* scope)
       : proxy_(proxy),
         mode_(mode),
         fun_(fun),
         scope_(scope) {
-    ASSERT(mode == Variable::VAR ||
-           mode == Variable::CONST ||
-           mode == Variable::LET);
+    ASSERT(mode == VAR ||
+           mode == CONST ||
+           mode == CONST_HARMONY ||
+           mode == LET);
     // At the moment there are no "const functions"'s in JavaScript...
-    ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
+    ASSERT(fun == NULL || mode == VAR || mode == LET);
   }
 
   DECLARE_NODE_TYPE(Declaration)
 
   VariableProxy* proxy() const { return proxy_; }
-  Variable::Mode mode() const { return mode_; }
+  VariableMode mode() const { return mode_; }
   FunctionLiteral* fun() const { return fun_; }  // may be NULL
   virtual bool IsInlineable() const;
   Scope* scope() const { return scope_; }
 
  private:
   VariableProxy* proxy_;
-  Variable::Mode mode_;
+  VariableMode mode_;
   FunctionLiteral* fun_;
 
   // Nested scope from which the declaration originated.
@@ -441,7 +437,11 @@
   Label* continue_target()  { return &continue_target_; }
 
  protected:
-  inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
+  IterationStatement(Isolate* isolate, ZoneStringList* labels)
+      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+        body_(NULL),
+        osr_entry_id_(GetNextId(isolate)) {
+  }
 
   void Initialize(Statement* body) {
     body_ = body;
@@ -456,7 +456,13 @@
 
 class DoWhileStatement: public IterationStatement {
  public:
-  inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
+  DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        cond_(NULL),
+        condition_position_(-1),
+        continue_id_(GetNextId(isolate)),
+        back_edge_id_(GetNextId(isolate)) {
+  }
 
   DECLARE_NODE_TYPE(DoWhileStatement)
 
@@ -489,7 +495,12 @@
 
 class WhileStatement: public IterationStatement {
  public:
-  inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
+  WhileStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        cond_(NULL),
+        may_have_function_literal_(true),
+        body_id_(GetNextId(isolate)) {
+  }
 
   DECLARE_NODE_TYPE(WhileStatement)
 
@@ -522,7 +533,16 @@
 
 class ForStatement: public IterationStatement {
  public:
-  inline ForStatement(Isolate* isolate, ZoneStringList* labels);
+  ForStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        init_(NULL),
+        cond_(NULL),
+        next_(NULL),
+        may_have_function_literal_(true),
+        loop_variable_(NULL),
+        continue_id_(GetNextId(isolate)),
+        body_id_(GetNextId(isolate)) {
+  }
 
   DECLARE_NODE_TYPE(ForStatement)
 
@@ -571,7 +591,12 @@
 
 class ForInStatement: public IterationStatement {
  public:
-  inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
+  ForInStatement(Isolate* isolate, ZoneStringList* labels)
+      : IterationStatement(isolate, labels),
+        each_(NULL),
+        enumerable_(NULL),
+        assignment_id_(GetNextId(isolate)) {
+  }
 
   DECLARE_NODE_TYPE(ForInStatement)
 
@@ -606,9 +631,6 @@
 
   virtual bool IsInlineable() const;
 
-  virtual Assignment* StatementAsSimpleAssignment();
-  virtual CountOperation* StatementAsCountOperation();
-
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
 
@@ -704,6 +726,8 @@
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
+  bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
   bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
 
  private:
@@ -711,7 +735,13 @@
   Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
-  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  enum CompareTypeFeedback {
+    NONE,
+    SMI_ONLY,
+    SYMBOL_ONLY,
+    STRING_ONLY,
+    OBJECT_ONLY
+  };
   CompareTypeFeedback compare_type_;
   int compare_id_;
   int entry_id_;
@@ -720,7 +750,12 @@
 
 class SwitchStatement: public BreakableStatement {
  public:
-  inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
+  SwitchStatement(Isolate* isolate, ZoneStringList* labels)
+      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+        tag_(NULL),
+        cases_(NULL) {
+  }
+
 
   DECLARE_NODE_TYPE(SwitchStatement)
 
@@ -808,18 +843,25 @@
 
 class TryStatement: public Statement {
  public:
-  explicit TryStatement(Block* try_block)
-      : try_block_(try_block), escaping_targets_(NULL) { }
+  explicit TryStatement(int index, Block* try_block)
+      : index_(index),
+        try_block_(try_block),
+        escaping_targets_(NULL) {
+  }
 
   void set_escaping_targets(ZoneList<Label*>* targets) {
     escaping_targets_ = targets;
   }
 
+  int index() const { return index_; }
   Block* try_block() const { return try_block_; }
   ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
   virtual bool IsInlineable() const;
 
  private:
+  // Unique (per-function) index of this handler.  This is not an AST ID.
+  int index_;
+
   Block* try_block_;
   ZoneList<Label*>* escaping_targets_;
 };
@@ -827,11 +869,12 @@
 
 class TryCatchStatement: public TryStatement {
  public:
-  TryCatchStatement(Block* try_block,
+  TryCatchStatement(int index,
+                    Block* try_block,
                     Scope* scope,
                     Variable* variable,
                     Block* catch_block)
-      : TryStatement(try_block),
+      : TryStatement(index, try_block),
         scope_(scope),
         variable_(variable),
         catch_block_(catch_block) {
@@ -853,8 +896,8 @@
 
 class TryFinallyStatement: public TryStatement {
  public:
-  TryFinallyStatement(Block* try_block, Block* finally_block)
-      : TryStatement(try_block),
+  TryFinallyStatement(int index, Block* try_block, Block* finally_block)
+      : TryStatement(index, try_block),
         finally_block_(finally_block) { }
 
   DECLARE_NODE_TYPE(TryFinallyStatement)
@@ -889,9 +932,6 @@
 
   DECLARE_NODE_TYPE(Literal)
 
-  virtual bool IsTrivial() { return true; }
-  virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
-
   // Check if this literal is identical to the other literal.
   bool IsIdenticalTo(const Literal* other) const {
     return handle_.is_identical_to(other->handle_);
@@ -1100,18 +1140,17 @@
  public:
   VariableProxy(Isolate* isolate, Variable* var);
 
+  VariableProxy(Isolate* isolate,
+                Handle<String> name,
+                bool is_this,
+                int position = RelocInfo::kNoPosition);
+
   DECLARE_NODE_TYPE(VariableProxy)
 
   virtual bool IsValidLeftHandSide() {
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
 
-  virtual bool IsTrivial() {
-    // Reading from a mutable variable is a side effect, but the
-    // variable for 'this' is immutable.
-    return is_this_ || is_trivial_;
-  }
-
   virtual bool IsInlineable() const;
 
   bool IsVariable(Handle<String> n) {
@@ -1123,7 +1162,6 @@
   Handle<String> name() const { return name_; }
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
-  bool inside_with() const { return inside_with_; }
   int position() const { return position_; }
 
   void MarkAsTrivial() { is_trivial_ = true; }
@@ -1135,17 +1173,8 @@
   Handle<String> name_;
   Variable* var_;  // resolved variable, or NULL
   bool is_this_;
-  bool inside_with_;
   bool is_trivial_;
   int position_;
-
-  VariableProxy(Isolate* isolate,
-                Handle<String> name,
-                bool is_this,
-                bool inside_with,
-                int position = RelocInfo::kNoPosition);
-
-  friend class Scope;
 };
 
 
@@ -1182,7 +1211,7 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-  virtual bool IsArrayLength() { return is_array_length_; }
+  bool IsArrayLength() { return is_array_length_; }
 
  private:
   Expression* obj_;
@@ -1320,8 +1349,17 @@
                  Token::Value op,
                  Expression* expression,
                  int pos)
-      : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
+      : Expression(isolate),
+        op_(op),
+        expression_(expression),
+        pos_(pos),
+        materialize_true_id_(AstNode::kNoNumber),
+        materialize_false_id_(AstNode::kNoNumber) {
     ASSERT(Token::IsUnaryOp(op));
+    if (op == Token::NOT) {
+      materialize_true_id_ = GetNextId(isolate);
+      materialize_false_id_ = GetNextId(isolate);
+    }
   }
 
   DECLARE_NODE_TYPE(UnaryOperation)
@@ -1334,10 +1372,18 @@
   Expression* expression() const { return expression_; }
   virtual int position() const { return pos_; }
 
+  int MaterializeTrueId() { return materialize_true_id_; }
+  int MaterializeFalseId() { return materialize_false_id_; }
+
  private:
   Token::Value op_;
   Expression* expression_;
   int pos_;
+
+  // For unary not (Token::NOT), the AST ids where true and false will
+  // actually be materialized, respectively.
+  int materialize_true_id_;
+  int materialize_false_id_;
 };
 
 
@@ -1465,6 +1511,7 @@
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
   bool IsLiteralCompareUndefined(Expression** expr);
+  bool IsLiteralCompareNull(Expression** expr);
 
  private:
   Token::Value op_;
@@ -1477,25 +1524,6 @@
 };
 
 
-class CompareToNull: public Expression {
- public:
-  CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
-      : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
-
-  DECLARE_NODE_TYPE(CompareToNull)
-
-  virtual bool IsInlineable() const;
-
-  bool is_strict() const { return is_strict_; }
-  Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
-  Expression* expression() const { return expression_; }
-
- private:
-  bool is_strict_;
-  Expression* expression_;
-};
-
-
 class Conditional: public Expression {
  public:
   Conditional(Isolate* isolate,
@@ -1630,31 +1658,30 @@
                   ZoneList<Statement*>* body,
                   int materialized_literal_count,
                   int expected_property_count,
+                  int handler_count,
                   bool has_only_simple_this_property_assignments,
                   Handle<FixedArray> this_property_assignments,
-                  int num_parameters,
-                  int start_position,
-                  int end_position,
+                  int parameter_count,
                   Type type,
                   bool has_duplicate_parameters)
       : Expression(isolate),
         name_(name),
         scope_(scope),
         body_(body),
+        this_property_assignments_(this_property_assignments),
+        inferred_name_(isolate->factory()->empty_string()),
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
-        has_only_simple_this_property_assignments_(
-            has_only_simple_this_property_assignments),
-        this_property_assignments_(this_property_assignments),
-        num_parameters_(num_parameters),
-        start_position_(start_position),
-        end_position_(end_position),
-        function_token_position_(RelocInfo::kNoPosition),
-        inferred_name_(HEAP->empty_string()),
-        is_expression_(type != DECLARATION),
-        is_anonymous_(type == ANONYMOUS_EXPRESSION),
-        pretenure_(false),
-        has_duplicate_parameters_(has_duplicate_parameters) {
+        handler_count_(handler_count),
+        parameter_count_(parameter_count),
+        function_token_position_(RelocInfo::kNoPosition) {
+    bitfield_ =
+        HasOnlySimpleThisPropertyAssignments::encode(
+            has_only_simple_this_property_assignments) |
+        IsExpression::encode(type != DECLARATION) |
+        IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+        Pretenure::encode(false) |
+        HasDuplicateParameters::encode(has_duplicate_parameters);
   }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1664,21 +1691,23 @@
   ZoneList<Statement*>* body() const { return body_; }
   void set_function_token_position(int pos) { function_token_position_ = pos; }
   int function_token_position() const { return function_token_position_; }
-  int start_position() const { return start_position_; }
-  int end_position() const { return end_position_; }
-  bool is_expression() const { return is_expression_; }
-  bool is_anonymous() const { return is_anonymous_; }
-  bool strict_mode() const;
+  int start_position() const;
+  int end_position() const;
+  bool is_expression() const { return IsExpression::decode(bitfield_); }
+  bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
+  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+  LanguageMode language_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
+  int handler_count() { return handler_count_; }
   bool has_only_simple_this_property_assignments() {
-      return has_only_simple_this_property_assignments_;
+    return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
   }
   Handle<FixedArray> this_property_assignments() {
       return this_property_assignments_;
   }
-  int num_parameters() { return num_parameters_; }
+  int parameter_count() { return parameter_count_; }
 
   bool AllowsLazyCompilation();
 
@@ -1692,29 +1721,33 @@
     inferred_name_ = inferred_name;
   }
 
-  bool pretenure() { return pretenure_; }
-  void set_pretenure(bool value) { pretenure_ = value; }
+  bool pretenure() { return Pretenure::decode(bitfield_); }
+  void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
   virtual bool IsInlineable() const;
 
-  bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+  bool has_duplicate_parameters() {
+    return HasDuplicateParameters::decode(bitfield_);
+  }
 
  private:
   Handle<String> name_;
   Scope* scope_;
   ZoneList<Statement*>* body_;
+  Handle<FixedArray> this_property_assignments_;
+  Handle<String> inferred_name_;
+
   int materialized_literal_count_;
   int expected_property_count_;
-  bool has_only_simple_this_property_assignments_;
-  Handle<FixedArray> this_property_assignments_;
-  int num_parameters_;
-  int start_position_;
-  int end_position_;
+  int handler_count_;
+  int parameter_count_;
   int function_token_position_;
-  Handle<String> inferred_name_;
-  bool is_expression_;
-  bool is_anonymous_;
-  bool pretenure_;
-  bool has_duplicate_parameters_;
+
+  unsigned bitfield_;
+  class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
+  class IsExpression: public BitField<bool, 1, 1> {};
+  class IsAnonymous: public BitField<bool, 2, 1> {};
+  class Pretenure: public BitField<bool, 3, 1> {};
+  class HasDuplicateParameters: public BitField<bool, 4, 1> {};
 };
 
 
@@ -2096,9 +2129,10 @@
   virtual bool IsEmpty();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
-  static RegExpEmpty* GetInstance() { return &kInstance; }
- private:
-  static RegExpEmpty kInstance;
+  static RegExpEmpty* GetInstance() {
+    static RegExpEmpty* instance = ::new RegExpEmpty();
+    return instance;
+  }
 };
 
 
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
index 5113de2..9498fd7 100644
--- a/src/atomicops_internals_mips_gcc.h
+++ b/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
 namespace v8 {
 namespace internal {
@@ -48,16 +48,19 @@
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
-  Atomic32 prev;
-  __asm__ __volatile__("1:\n"
-                       "ll %0, %1\n"  // prev = *ptr
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
                        "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "nop\n"  // delay slot nop
-                       "sc %2, %1\n"  // *ptr = new_value (with atomic check)
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
                        "beqz %2, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        "2:\n"
-                       : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
                        : "Ir" (old_value), "r" (new_value), "m" (*ptr)
                        : "memory");
   return prev;
@@ -68,12 +71,15 @@
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 temp, old;
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %1, %2\n"  // old = *ptr
                        "move %0, %3\n"  // temp = new_value
                        "sc %0, %2\n"  // *ptr = temp (with atomic check)
                        "beqz %0, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (old), "=m" (*ptr)
                        : "r" (new_value), "m" (*ptr)
                        : "memory");
@@ -87,13 +93,15 @@
                                           Atomic32 increment) {
   Atomic32 temp, temp2;
 
-  __asm__ __volatile__("1:\n"
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
                        "ll %0, %2\n"  // temp = *ptr
-                       "addu %0, %3\n"  // temp = temp + increment
-                       "move %1, %0\n"  // temp2 = temp
-                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
-                       "beqz %0, 1b\n"  // start again on atomic error
-                       "nop\n"  // delay slot nop
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
                        : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
                        : "Ir" (increment), "m" (*ptr)
                        : "memory");
@@ -103,6 +111,7 @@
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
+  ATOMICOPS_COMPILER_BARRIER();
   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   ATOMICOPS_COMPILER_BARRIER();
   return res;
@@ -117,16 +126,19 @@
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   ATOMICOPS_COMPILER_BARRIER();
-  return x;
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   ATOMICOPS_COMPILER_BARRIER();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  ATOMICOPS_COMPILER_BARRIER();
+  return res;
 }
 
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@
 }
 
 inline void MemoryBarrier() {
-  ATOMICOPS_COMPILER_BARRIER();
+  __asm__ __volatile__("sync" : : : "memory");
 }
 
 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f07e625..29c16ee 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -34,9 +34,11 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "macro-assembler.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "platform.h"
 #include "snapshot.h"
 #include "extensions/externalize-string-extension.h"
 #include "extensions/gc-extension.h"
@@ -209,12 +211,31 @@
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
+
+  enum ExtensionTraversalState {
+    UNVISITED, VISITED, INSTALLED
+  };
+
+  class ExtensionStates {
+  public:
+    ExtensionStates();
+    ExtensionTraversalState get_state(RegisteredExtension* extension);
+    void set_state(RegisteredExtension* extension,
+                   ExtensionTraversalState state);
+  private:
+    Allocator allocator_;
+    HashMap map_;
+    DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
+  };
+
   // Used both for deserialized and from-scratch contexts to add the extensions
   // provided.
   static bool InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions);
-  static bool InstallExtension(const char* name);
-  static bool InstallExtension(v8::RegisteredExtension* current);
+  static bool InstallExtension(const char* name,
+                               ExtensionStates* extension_states);
+  static bool InstallExtension(v8::RegisteredExtension* current,
+                               ExtensionStates* extension_states);
   static void InstallSpecialObjects(Handle<Context> global_context);
   bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
@@ -361,6 +382,7 @@
   if (is_ecma_native) {
     function->shared()->set_instance_class_name(*symbol);
   }
+  function->shared()->set_native(true);
   return function;
 }
 
@@ -374,26 +396,28 @@
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
+  DescriptorArray::WhitenessWitness witness(*descriptors);
+
   {  // Add length.
     Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
     CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
-    descriptors->Set(0, &d);
+    descriptors->Set(0, &d, witness);
   }
   {  // Add name.
     Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
     CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
-    descriptors->Set(1, &d);
+    descriptors->Set(1, &d, witness);
   }
   {  // Add arguments.
     Handle<Foreign> foreign =
         factory()->NewForeign(&Accessors::FunctionArguments);
     CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
-    descriptors->Set(2, &d);
+    descriptors->Set(2, &d, witness);
   }
   {  // Add caller.
     Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
     CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
-    descriptors->Set(3, &d);
+    descriptors->Set(3, &d, witness);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
     // Add prototype.
@@ -403,9 +427,9 @@
     Handle<Foreign> foreign =
         factory()->NewForeign(&Accessors::FunctionPrototype);
     CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
-    descriptors->Set(4, &d);
+    descriptors->Set(4, &d, witness);
   }
-  descriptors->Sort();
+  descriptors->Sort(witness);
   return descriptors;
 }
 
@@ -478,7 +502,7 @@
   // 262 15.3.4.
   Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
   Handle<JSFunction> empty_function =
-      factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
+      factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
 
   // --- E m p t y ---
   Handle<Code> code =
@@ -521,41 +545,43 @@
                                     ? 4
                                     : 5);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(
-      DONT_ENUM | DONT_DELETE | READ_ONLY);
+      DONT_ENUM | DONT_DELETE);
+
+  DescriptorArray::WhitenessWitness witness(*descriptors);
 
   {  // length
     Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
     CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
-    descriptors->Set(0, &d);
+    descriptors->Set(0, &d, witness);
   }
   {  // name
     Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
     CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
-    descriptors->Set(1, &d);
+    descriptors->Set(1, &d, witness);
   }
   {  // arguments
     CallbacksDescriptor d(*factory()->arguments_symbol(),
                           *arguments,
                           attributes);
-    descriptors->Set(2, &d);
+    descriptors->Set(2, &d, witness);
   }
   {  // caller
     CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
-    descriptors->Set(3, &d);
+    descriptors->Set(3, &d, witness);
   }
 
   // prototype
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
-    if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
-      attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
+    if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
+      attributes = static_cast<PropertyAttributes>(attributes | READ_ONLY);
     }
     Handle<Foreign> foreign =
         factory()->NewForeign(&Accessors::FunctionPrototype);
     CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
-    descriptors->Set(4, &d);
+    descriptors->Set(4, &d, witness);
   }
 
-  descriptors->Sort();
+  descriptors->Sort(witness);
   return descriptors;
 }
 
@@ -565,7 +591,7 @@
   if (throw_type_error_function.is_null()) {
     Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
     throw_type_error_function =
-      factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+      factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
     Handle<Code> code(isolate()->builtins()->builtin(
         Builtins::kStrictModePoisonPill));
     throw_type_error_function->set_map(
@@ -940,6 +966,7 @@
     ASSERT_EQ(0, initial_map->inobject_properties());
 
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
+    DescriptorArray::WhitenessWitness witness(*descriptors);
     PropertyAttributes final =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
     int enum_index = 0;
@@ -949,7 +976,7 @@
                             JSRegExp::kSourceFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(0, &field);
+      descriptors->Set(0, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.2.
@@ -957,7 +984,7 @@
                             JSRegExp::kGlobalFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(1, &field);
+      descriptors->Set(1, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.3.
@@ -965,7 +992,7 @@
                             JSRegExp::kIgnoreCaseFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(2, &field);
+      descriptors->Set(2, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.4.
@@ -973,7 +1000,7 @@
                             JSRegExp::kMultilineFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(3, &field);
+      descriptors->Set(3, &field, witness);
     }
     {
       // ECMA-262, section 15.10.7.5.
@@ -983,10 +1010,10 @@
                             JSRegExp::kLastIndexFieldIndex,
                             writable,
                             enum_index++);
-      descriptors->Set(4, &field);
+      descriptors->Set(4, &field, witness);
     }
     descriptors->SetNextEnumerationIndex(enum_index);
-    descriptors->Sort();
+    descriptors->Sort(witness);
 
     initial_map->set_inobject_properties(5);
     initial_map->set_pre_allocated_property_fields(5);
@@ -995,6 +1022,26 @@
         initial_map->instance_size() + 5 * kPointerSize);
     initial_map->set_instance_descriptors(*descriptors);
     initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
+
+    // RegExp prototype object is itself a RegExp.
+    Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
+    proto_map->set_prototype(global_context()->initial_object_prototype());
+    Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
+    proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
+                                 heap->empty_string());
+    proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+                                 Smi::FromInt(0),
+                                 SKIP_WRITE_BARRIER);  // It's a Smi.
+    initial_map->set_prototype(*proto);
+    factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
+                                   JSRegExp::IRREGEXP, factory->empty_string(),
+                                   JSRegExp::Flags(0), 0);
   }
 
   {  // -- J S O N
@@ -1044,7 +1091,7 @@
                             DONT_ENUM);
 
 #ifdef DEBUG
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     result->LocalLookup(heap->callee_symbol(), &lookup);
     ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
@@ -1063,11 +1110,6 @@
   }
 
   {  // --- aliased_arguments_boilerplate_
-    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
-    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
-    new_map->set_pre_allocated_property_fields(2);
-    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
-    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     // Set up a well-formed parameter map to make assertions happy.
     Handle<FixedArray> elements = factory->NewFixedArray(2);
     elements->set_map(heap->non_strict_arguments_elements_map());
@@ -1076,7 +1118,16 @@
     elements->set(0, *array);
     array = factory->NewFixedArray(0);
     elements->set(1, *array);
+
+    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+    new_map->set_pre_allocated_property_fields(2);
+    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+    // Set elements kind after allocating the object because
+    // NewJSObjectFromMap assumes a fast elements map.
+    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     result->set_elements(*elements);
+    ASSERT(result->HasNonStrictArgumentsElements());
     global_context()->set_aliased_arguments_boilerplate(*result);
   }
 
@@ -1099,19 +1150,20 @@
 
     // Create the descriptor array for the arguments object.
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
+    DescriptorArray::WhitenessWitness witness(*descriptors);
     {  // length
       FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
-      descriptors->Set(0, &d);
+      descriptors->Set(0, &d, witness);
     }
     {  // callee
       CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
-      descriptors->Set(1, &d);
+      descriptors->Set(1, &d, witness);
     }
     {  // caller
       CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
-      descriptors->Set(2, &d);
+      descriptors->Set(2, &d, witness);
     }
-    descriptors->Sort();
+    descriptors->Sort(witness);
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
@@ -1136,7 +1188,7 @@
                             DONT_ENUM);
 
 #ifdef DEBUG
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     result->LocalLookup(heap->length_symbol(), &lookup);
     ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
@@ -1195,6 +1247,14 @@
 
   // Initialize the data slot.
   global_context()->set_data(heap->undefined_value());
+
+  {
+    // Initialize the random seed slot.
+    Handle<ByteArray> zeroed_byte_array(
+        factory->NewByteArray(kRandomStateSize));
+    global_context()->set_random_seed(*zeroed_byte_array);
+    memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
+  }
 }
 
 
@@ -1202,12 +1262,26 @@
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
   // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
-  // longer need to live behind a flag, so WeakMap gets added to the snapshot.
-  if (FLAG_harmony_weakmaps) {  // -- W e a k M a p
-    Handle<JSObject> prototype =
-        factory()->NewJSObject(isolate()->object_function(), TENURED);
-    InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
-                    prototype, Builtins::kIllegal, true);
+  // longer need to live behind a flag, so functions get added to the snapshot.
+  if (FLAG_harmony_collections) {
+    {  // -- S e t
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
+    {  // -- M a p
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
+    {  // -- W e a k M a p
+      Handle<JSObject> prototype =
+          factory()->NewJSObject(isolate()->object_function(), TENURED);
+      InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+                      prototype, Builtins::kIllegal, true);
+    }
   }
 }
 
@@ -1327,6 +1401,8 @@
                  configure_instance_fun);
   INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+  INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
+                 to_complete_property_descriptor);
 }
 
 void Genesis::InstallExperimentalNativeFunctions() {
@@ -1334,6 +1410,7 @@
     INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
     INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
     INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
+    INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
   }
 }
 
@@ -1555,6 +1632,18 @@
         isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
+    // InternalArrays should not use Smi-Only array optimizations. There are too
+    // many places in the C++ runtime code (e.g. RegEx) that assume that
+    // elements in InternalArrays can be set to non-Smi values without going
+    // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+    // transition easy to trap. Moreover, they rarely are smi-only.
+    MaybeObject* maybe_map =
+        array_function->initial_map()->CopyDropTransitions();
+    Map* new_map;
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+    new_map->set_elements_kind(FAST_ELEMENTS);
+    array_function->set_initial_map(new_map);
+
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
         factory()->CopyAppendForeignDescriptor(
@@ -1656,7 +1745,9 @@
     Handle<DescriptorArray> reresult_descriptors =
         factory()->NewDescriptorArray(3);
 
-    reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
+    DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
+
+    reresult_descriptors->CopyFrom(0, *array_descriptors, 0, witness);
 
     int enum_index = 0;
     {
@@ -1664,7 +1755,7 @@
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(1, &index_field);
+      reresult_descriptors->Set(1, &index_field, witness);
     }
 
     {
@@ -1672,9 +1763,9 @@
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(2, &input_field);
+      reresult_descriptors->Set(2, &input_field, witness);
     }
-    reresult_descriptors->Sort();
+    reresult_descriptors->Sort(witness);
 
     initial_map->set_inobject_properties(2);
     initial_map->set_pre_allocated_property_fields(2);
@@ -1701,9 +1792,9 @@
                "native proxy.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
-    if (FLAG_harmony_weakmaps &&
+    if (FLAG_harmony_collections &&
         strcmp(ExperimentalNatives::GetScriptName(i).start(),
-               "native weakmap.js") == 0) {
+               "native collection.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
   }
@@ -1863,6 +1954,34 @@
 #endif
 }
 
+static uint32_t Hash(RegisteredExtension* extension) {
+  return v8::internal::ComputePointerHash(extension);
+}
+
+static bool MatchRegisteredExtensions(void* key1, void* key2) {
+  return key1 == key2;
+}
+
+Genesis::ExtensionStates::ExtensionStates()
+  : allocator_(),
+    map_(MatchRegisteredExtensions, &allocator_, 8)
+  {}
+
+Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
+    RegisteredExtension* extension) {
+  i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension), false);
+  if (entry == NULL) {
+    return UNVISITED;
+  }
+  return static_cast<ExtensionTraversalState>(
+      reinterpret_cast<intptr_t>(entry->value));
+}
+
+void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
+                                         ExtensionTraversalState state) {
+  map_.Lookup(extension, Hash(extension), true)->value =
+      reinterpret_cast<void*>(static_cast<intptr_t>(state));
+}
 
 bool Genesis::InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions) {
@@ -1870,29 +1989,27 @@
   //                 effort. (The external API reads 'ignore'-- does that mean
   //                 we can break the interface?)
 
-  // Clear coloring of extension list
+
+  ExtensionStates extension_states;  // All extensions have state UNVISITED.
+  // Install auto extensions.
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
-    current->set_state(v8::UNVISITED);
-    current = current->next();
-  }
-  // Install auto extensions.
-  current = v8::RegisteredExtension::first_extension();
-  while (current != NULL) {
     if (current->extension()->auto_enable())
-      InstallExtension(current);
+      InstallExtension(current, &extension_states);
     current = current->next();
   }
 
-  if (FLAG_expose_gc) InstallExtension("v8/gc");
-  if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
+  if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
+  if (FLAG_expose_externalize_string) {
+    InstallExtension("v8/externalize", &extension_states);
+  }
 
   if (extensions == NULL) return true;
   // Install required extensions
   int count = v8::ImplementationUtilities::GetNameCount(extensions);
   const char** names = v8::ImplementationUtilities::GetNames(extensions);
   for (int i = 0; i < count; i++) {
-    if (!InstallExtension(names[i]))
+    if (!InstallExtension(names[i], &extension_states))
       return false;
   }
 
@@ -1902,7 +2019,8 @@
 
 // Installs a named extension.  This methods is unoptimized and does
 // not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name) {
+bool Genesis::InstallExtension(const char* name,
+                               ExtensionStates* extension_states) {
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   // Loop until we find the relevant extension
   while (current != NULL) {
@@ -1915,42 +2033,52 @@
         "v8::Context::New()", "Cannot find required extension");
     return false;
   }
-  return InstallExtension(current);
+  return InstallExtension(current, extension_states);
 }
 
 
-bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
+bool Genesis::InstallExtension(v8::RegisteredExtension* current,
+                               ExtensionStates* extension_states) {
   HandleScope scope;
 
-  if (current->state() == v8::INSTALLED) return true;
+  if (extension_states->get_state(current) == INSTALLED) return true;
   // The current node has already been visited so there must be a
   // cycle in the dependency graph; fail.
-  if (current->state() == v8::VISITED) {
+  if (extension_states->get_state(current) == VISITED) {
     v8::Utils::ReportApiFailure(
         "v8::Context::New()", "Circular extension dependency");
     return false;
   }
-  ASSERT(current->state() == v8::UNVISITED);
-  current->set_state(v8::VISITED);
+  ASSERT(extension_states->get_state(current) == UNVISITED);
+  extension_states->set_state(current, VISITED);
   v8::Extension* extension = current->extension();
   // Install the extension's dependencies
   for (int i = 0; i < extension->dependency_count(); i++) {
-    if (!InstallExtension(extension->dependencies()[i])) return false;
+    if (!InstallExtension(extension->dependencies()[i], extension_states))
+      return false;
   }
   Isolate* isolate = Isolate::Current();
-  Vector<const char> source = CStrVector(extension->source());
-  Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
-  bool result = CompileScriptCached(CStrVector(extension->name()),
-                                    source_code,
-                                    isolate->bootstrapper()->extensions_cache(),
-                                    extension,
-                                    Handle<Context>(isolate->context()),
-                                    false);
+  Handle<String> source_code =
+      isolate->factory()->NewExternalStringFromAscii(extension->source());
+  bool result = CompileScriptCached(
+      CStrVector(extension->name()),
+      source_code,
+      isolate->bootstrapper()->extensions_cache(),
+      extension,
+      Handle<Context>(isolate->context()),
+      false);
   ASSERT(isolate->has_pending_exception() != result);
   if (!result) {
+    // We print out the name of the extension that fail to install.
+    // When an error is thrown during bootstrapping we automatically print
+    // the line number at which this happened to the console in the isolate
+    // error throwing functionality.
+    OS::PrintError("Error installing extension '%s'.\n",
+                   current->extension()->name());
     isolate->clear_pending_exception();
   }
-  current->set_state(v8::INSTALLED);
+  extension_states->set_state(current, INSTALLED);
+  isolate->NotifyExtensionInstalled();
   return result;
 }
 
@@ -1967,7 +2095,9 @@
     builtins->set_javascript_builtin(id, *function);
     Handle<SharedFunctionInfo> shared
         = Handle<SharedFunctionInfo>(function->shared());
-    if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+    if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+      return false;
+    }
     // Set the code object on the function object.
     function->ReplaceCode(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
@@ -2047,7 +2177,7 @@
           break;
         }
         case CALLBACKS: {
-          LookupResult result;
+          LookupResult result(isolate());
           to->LocalLookup(descs->GetKey(i), &result);
           // If the property is already there we skip it
           if (result.IsProperty()) continue;
@@ -2085,7 +2215,7 @@
       if (properties->IsKey(raw_key)) {
         ASSERT(raw_key->IsString());
         // If the property is already there we skip it.
-        LookupResult result;
+        LookupResult result(isolate());
         to->LocalLookup(String::cast(raw_key), &result);
         if (result.IsProperty()) continue;
         // Set the property.
diff --git a/src/builtins.cc b/src/builtins.cc
index e6a0699..43cf358 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -33,6 +33,7 @@
 #include "builtins.h"
 #include "gdb-jit.h"
 #include "ic-inl.h"
+#include "mark-compact.h"
 #include "vm-state-inl.h"
 
 namespace v8 {
@@ -202,7 +203,7 @@
   }
 
   // 'array' now contains the JSArray we should initialize.
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   // Optimize the case where there is one argument and the argument is a
   // small smi.
@@ -215,7 +216,8 @@
         { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
           if (!maybe_obj->ToObject(&obj)) return maybe_obj;
         }
-        array->SetContent(FixedArray::cast(obj));
+        MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
+        if (maybe_obj->IsFailure()) return maybe_obj;
         return array;
       }
     }
@@ -239,6 +241,11 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
+  // Set length and elements on the array.
+  MaybeObject* maybe_object =
+      array->EnsureCanContainElements(FixedArray::cast(obj));
+  if (maybe_object->IsFailure()) return maybe_object;
+
   AssertNoAllocation no_gc;
   FixedArray* elms = FixedArray::cast(obj);
   WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -247,7 +254,6 @@
     elms->set(index, args[index+1], mode);
   }
 
-  // Set length and elements on the array.
   array->set_elements(FixedArray::cast(obj));
   array->set_length(len);
 
@@ -295,6 +301,7 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
+  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -313,6 +320,7 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
+  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -358,6 +366,14 @@
   former_start[to_trim] = heap->fixed_array_map();
   former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
 
+  // Maintain marking consistency for HeapObjectIterator and
+  // IncrementalMarking.
+  int size_delta = to_trim * kPointerSize;
+  if (heap->marking()->TransferMark(elms->address(),
+                                    elms->address() + size_delta)) {
+    MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
+  }
+
   return FixedArray::cast(HeapObject::FromAddress(
       elms->address() + to_trim * kPointerSize));
 }
@@ -369,9 +385,6 @@
   // This method depends on non writability of Object and Array prototype
   // fields.
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
-  // Hidden prototype
-  array_proto = JSObject::cast(array_proto->GetPrototype());
-  ASSERT(array_proto->elements() == heap->empty_fixed_array());
   // Object.prototype
   Object* proto = array_proto->GetPrototype();
   if (proto == heap->null_value()) return false;
@@ -384,20 +397,42 @@
 
 MUST_USE_RESULT
 static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
-    Heap* heap, Object* receiver) {
+    Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
   if (!receiver->IsJSArray()) return NULL;
   JSArray* array = JSArray::cast(receiver);
   HeapObject* elms = array->elements();
-  if (elms->map() == heap->fixed_array_map()) return elms;
-  if (elms->map() == heap->fixed_cow_array_map()) {
-    return array->EnsureWritableFastElements();
+  Map* map = elms->map();
+  if (map == heap->fixed_array_map()) {
+    if (args == NULL || !array->HasFastSmiOnlyElements()) {
+      return elms;
+    }
+  } else if (map == heap->fixed_cow_array_map()) {
+    MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
+    if (args == NULL || !array->HasFastSmiOnlyElements() ||
+        maybe_writable_result->IsFailure()) {
+      return maybe_writable_result;
+    }
+  } else {
+    return NULL;
   }
-  return NULL;
+
+  // Need to ensure that the arguments passed in args can be contained in
+  // the array.
+  int args_length = args->length();
+  if (first_added_arg >= args_length) return array->elements();
+
+  MaybeObject* maybe_array = array->EnsureCanContainElements(
+      args,
+      first_added_arg,
+      args_length - first_added_arg);
+  if (maybe_array->IsFailure()) return maybe_array;
+  return array->elements();
 }
 
 
 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                      JSArray* receiver) {
+  if (!FLAG_clever_optimizations) return false;
   Context* global_context = heap->isolate()->context()->global_context();
   JSObject* array_proto =
       JSObject::cast(global_context->array_function()->prototype());
@@ -413,20 +448,18 @@
   HandleScope handleScope(isolate);
 
   Handle<Object> js_builtin =
-      GetProperty(Handle<JSObject>(
-          isolate->global_context()->builtins()),
-          name);
-  ASSERT(js_builtin->IsJSFunction());
-  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
-  ScopedVector<Object**> argv(args.length() - 1);
-  int n_args = args.length() - 1;
-  for (int i = 0; i < n_args; i++) {
-    argv[i] = args.at<Object>(i + 1).location();
+      GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
+                  name);
+  Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
+  int argc = args.length() - 1;
+  ScopedVector<Handle<Object> > argv(argc);
+  for (int i = 0; i < argc; ++i) {
+    argv[i] = args.at<Object>(i + 1);
   }
-  bool pending_exception = false;
+  bool pending_exception;
   Handle<Object> result = Execution::Call(function,
                                           args.receiver(),
-                                          n_args,
+                                          argc,
                                           argv.start(),
                                           &pending_exception);
   if (pending_exception) return Failure::Exception();
@@ -439,7 +472,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
     if (maybe_elms_obj == NULL) {
       return CallJsBuiltin(isolate, "ArrayPush", args);
     }
@@ -475,7 +508,6 @@
     FillWithHoles(heap, new_elms, new_length, capacity);
 
     elms = new_elms;
-    array->set_elements(elms);
   }
 
   // Add the provided values.
@@ -485,6 +517,10 @@
     elms->set(index + len, args[index + 1], mode);
   }
 
+  if (elms != array->elements()) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
   return Smi::FromInt(new_length);
@@ -496,7 +532,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
@@ -529,7 +565,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayShift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -539,7 +575,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -551,9 +587,7 @@
   }
 
   if (!heap->lo_space()->Contains(elms)) {
-    // As elms still in the same space they used to be,
-    // there is no need to update region dirty mark.
-    array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
+    array->set_elements(LeftTrimFixedArray(heap, elms, 1));
   } else {
     // Shift the elements.
     AssertNoAllocation no_gc;
@@ -573,7 +607,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayUnshift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -583,7 +617,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
@@ -592,6 +626,10 @@
   // we should never hit this case.
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
+  MaybeObject* maybe_object =
+      array->EnsureCanContainElements(&args, 1, to_add);
+  if (maybe_object->IsFailure()) return maybe_object;
+
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
@@ -600,13 +638,11 @@
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* new_elms = FixedArray::cast(obj);
-
     AssertNoAllocation no_gc;
     if (len > 0) {
       CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
     }
     FillWithHoles(heap, new_elms, new_length, capacity);
-
     elms = new_elms;
     array->set_elements(elms);
   } else {
@@ -634,7 +670,7 @@
   int len = -1;
   if (receiver->IsJSArray()) {
     JSArray* array = JSArray::cast(receiver);
-    if (!array->HasFastElements() ||
+    if (!array->HasFastTypeElements() ||
         !IsJSArrayFastElementMovingAllowed(heap, array)) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -650,7 +686,7 @@
     bool is_arguments_object_with_fast_elements =
         receiver->IsJSObject()
         && JSObject::cast(receiver)->map() == arguments_map
-        && JSObject::cast(receiver)->HasFastElements();
+        && JSObject::cast(receiver)->HasFastTypeElements();
     if (!is_arguments_object_with_fast_elements) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -721,6 +757,10 @@
   }
   FixedArray* result_elms = FixedArray::cast(result);
 
+  MaybeObject* maybe_object =
+      result_array->EnsureCanContainElements(result_elms);
+  if (maybe_object->IsFailure()) return maybe_object;
+
   AssertNoAllocation no_gc;
   CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
 
@@ -729,6 +769,14 @@
 
   // Set the length.
   result_array->set_length(Smi::FromInt(result_len));
+
+  // Set the ElementsKind.
+  ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
+  if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
+                                          elements_kind)) {
+    MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+    if (maybe->IsFailure()) return maybe;
+  }
   return result_array;
 }
 
@@ -738,7 +786,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver);
+        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArraySplice", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -748,7 +796,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
 
@@ -822,12 +870,20 @@
 
     // Set the length.
     result_array->set_length(Smi::FromInt(actual_delete_count));
+
+    // Set the ElementsKind.
+    ElementsKind elements_kind = array->GetElementsKind();
+    if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
+                                            elements_kind)) {
+      MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
+      if (maybe->IsFailure()) return maybe;
+    }
   }
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
-
   int new_length = len - actual_delete_count + item_count;
 
+  bool elms_changed = false;
   if (item_count < actual_delete_count) {
     // Shrink the array.
     const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -842,7 +898,8 @@
       }
 
       elms = LeftTrimFixedArray(heap, elms, delta);
-      array->set_elements(elms, SKIP_WRITE_BARRIER);
+
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -882,7 +939,7 @@
       FillWithHoles(heap, new_elms, new_length, capacity);
 
       elms = new_elms;
-      array->set_elements(elms);
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -898,6 +955,10 @@
     elms->set(k, args[3 + k - actual_start], mode);
   }
 
+  if (elms_changed) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
 
@@ -920,7 +981,7 @@
   int result_len = 0;
   for (int i = 0; i < n_arguments; i++) {
     Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
         || JSArray::cast(arg)->GetPrototype() != array_proto) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
@@ -956,6 +1017,17 @@
   }
   FixedArray* result_elms = FixedArray::cast(result);
 
+  // Ensure element type transitions happen before copying elements in.
+  if (result_array->HasFastSmiOnlyElements()) {
+    for (int i = 0; i < n_arguments; i++) {
+      JSArray* array = JSArray::cast(args[i]);
+      if (!array->HasFastSmiOnlyElements()) {
+        result_array->EnsureCanContainNonSmiElements();
+        break;
+      }
+    }
+  }
+
   // Copy data.
   AssertNoAllocation no_gc;
   int start_pos = 0;
@@ -1448,6 +1520,14 @@
   KeyedStoreIC::GenerateNonStrictArguments(masm);
 }
 
+static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
+}
+
+static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
+}
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateLoadICDebugBreak(masm);
@@ -1479,8 +1559,8 @@
 }
 
 
-static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateStubNoRegistersDebugBreak(masm);
+static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateCallFunctionStubDebugBreak(masm);
 }
 
 
@@ -1607,20 +1687,22 @@
   const BuiltinDesc* functions = BuiltinFunctionTable::functions();
 
   // For now we generate builtin adaptor code into a stack-allocated
-  // buffer, before copying it into individual code objects.
-  byte buffer[4*KB];
+  // buffer, before copying it into individual code objects. Be careful
+  // with alignment, some platforms don't like unaligned code.
+  union { int force_alignment; byte buffer[4*KB]; } u;
 
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(isolate, buffer, sizeof buffer);
+      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
+      ASSERT(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
diff --git a/src/builtins.h b/src/builtins.h
index 31090d3..3659f99 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -167,6 +167,10 @@
                                     kStrictMode)                        \
   V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC,       \
                                      Code::kNoExtraICState)             \
+  V(TransitionElementsSmiToDouble,  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED,           \
+                                      Code::kNoExtraICState)            \
                                                                         \
   /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
   V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
@@ -188,27 +192,27 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 // Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V)                                \
-  V(Return_DebugBreak,          BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(ConstructCall_DebugBreak,   BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(LoadIC_DebugBreak,          LOAD_IC, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(KeyedLoadIC_DebugBreak,     KEYED_LOAD_IC, DEBUG_BREAK,    \
-                                Code::kNoExtraICState)         \
-  V(StoreIC_DebugBreak,         STORE_IC, DEBUG_BREAK,         \
-                                Code::kNoExtraICState)         \
-  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK,   \
-                                Code::kNoExtraICState)         \
-  V(Slot_DebugBreak,            BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(PlainReturn_LiveEdit,       BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)         \
-  V(FrameDropper_LiveEdit,      BUILTIN, DEBUG_BREAK,          \
-                                Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V)                                 \
+  V(Return_DebugBreak,           BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(ConstructCall_DebugBreak,    BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(LoadIC_DebugBreak,           LOAD_IC, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(KeyedLoadIC_DebugBreak,      KEYED_LOAD_IC, DEBUG_BREAK,    \
+                                 Code::kNoExtraICState)         \
+  V(StoreIC_DebugBreak,          STORE_IC, DEBUG_BREAK,         \
+                                 Code::kNoExtraICState)         \
+  V(KeyedStoreIC_DebugBreak,     KEYED_STORE_IC, DEBUG_BREAK,   \
+                                 Code::kNoExtraICState)         \
+  V(Slot_DebugBreak,             BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(PlainReturn_LiveEdit,        BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)         \
+  V(FrameDropper_LiveEdit,       BUILTIN, DEBUG_BREAK,          \
+                                 Code::kNoExtraICState)
 #else
 #define BUILTIN_LIST_DEBUG_A(V)
 #endif
@@ -234,7 +238,6 @@
   V(DELETE, 2)                           \
   V(IN, 1)                               \
   V(INSTANCE_OF, 1)                      \
-  V(GET_KEYS, 0)                         \
   V(FILTER_KEY, 1)                       \
   V(CALL_NON_FUNCTION, 0)                \
   V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index 93218ea..b13efb3 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,12 +33,12 @@
 namespace internal {
 
 
-static const int BYTECODE_MASK = 0xff;
+const int BYTECODE_MASK = 0xff;
 // The first argument is packed in with the byte code in one word, but so it
 // has 24 bits, but it can be positive and negative so only use 23 bits for
 // positive values.
-static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-static const int BYTECODE_SHIFT = 8;
+const unsigned int MAX_FIRST_ARG = 0x7fffffu;
+const int BYTECODE_SHIFT = 8;
 
 #define BYTECODE_ITERATOR(V)                                                   \
 V(BREAK,              0, 4)   /* bc8                                        */ \
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index 30a67a6..9241d26 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -134,14 +134,12 @@
 };
 
 static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
+static const int kCachedPowersOffset = 348;  // -1 * the first decimal_exponent.
 static const double kD_1_LOG2_10 = 0.30102999566398114;  //  1 / lg(10)
-const int PowersOfTenCache::kDecimalExponentDistance =
-    kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMinDecimalExponent =
-    kCachedPowers[0].decimal_exponent;
-const int PowersOfTenCache::kMaxDecimalExponent =
-    kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
+// Difference between the decimal exponents in the table above.
+const int PowersOfTenCache::kDecimalExponentDistance = 8;
+const int PowersOfTenCache::kMinDecimalExponent = -348;
+const int PowersOfTenCache::kMaxDecimalExponent = 340;
 
 void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
     int min_exponent,
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index 0dfc80d..1a89ef3 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,7 +52,7 @@
 }
 
 
-static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
+inline bool IsInRange(int value, int lower_limit, int higher_limit) {
   ASSERT(lower_limit <= higher_limit);
   return static_cast<unsigned int>(value - lower_limit) <=
       static_cast<unsigned int>(higher_limit - lower_limit);
diff --git a/src/checks.h b/src/checks.h
index 2f359f6..8608b0e 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,10 +52,10 @@
 
 
 // Used by the CHECK macro -- should not be called directly.
-static inline void CheckHelper(const char* file,
-                               int line,
-                               const char* source,
-                               bool condition) {
+inline void CheckHelper(const char* file,
+                        int line,
+                        const char* source,
+                        bool condition) {
   if (!condition)
     V8_Fatal(file, line, "CHECK(%s) failed", source);
 }
@@ -63,14 +63,16 @@
 
 // The CHECK macro checks that the given condition is true; if not, it
 // prints a message to stderr and aborts.
-#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
+#define CHECK(condition) do {                                             \
+    if (!(condition)) CheckHelper(__FILE__, __LINE__, #condition, false); \
+  } while (0)
 
 
 // Helper function used by the CHECK_EQ function when given int
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source, int expected,
-                                     const char* value_source, int value) {
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source, int expected,
+                              const char* value_source, int value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
@@ -81,11 +83,11 @@
 
 // Helper function used by the CHECK_EQ function when given int64_t
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     int64_t expected,
-                                     const char* value_source,
-                                     int64_t value) {
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
   if (expected != value) {
     // Print int64_t values in hex, as two int32s,
     // to avoid platform-dependencies.
@@ -103,12 +105,12 @@
 
 // Helper function used by the CHECK_NE function when given int
 // arguments.  Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* unexpected_source,
-                                        int unexpected,
-                                        const char* value_source,
-                                        int value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* unexpected_source,
+                                 int unexpected,
+                                 const char* value_source,
+                                 int value) {
   if (unexpected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
              unexpected_source, value_source, value);
@@ -118,12 +120,12 @@
 
 // Helper function used by the CHECK function when given string
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     const char* expected,
-                                     const char* value_source,
-                                     const char* value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const char* expected,
+                              const char* value_source,
+                              const char* value) {
   if ((expected == NULL && value != NULL) ||
       (expected != NULL && value == NULL) ||
       (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
@@ -134,12 +136,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* expected_source,
-                                        const char* expected,
-                                        const char* value_source,
-                                        const char* value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const char* expected,
+                                 const char* value_source,
+                                 const char* value) {
   if (expected == value ||
       (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
@@ -150,12 +152,12 @@
 
 // Helper function used by the CHECK function when given pointer
 // arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     const void* expected,
-                                     const char* value_source,
-                                     const void* value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const void* expected,
+                              const char* value_source,
+                              const void* value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
@@ -165,12 +167,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                        int line,
-                                        const char* expected_source,
-                                        const void* expected,
-                                        const char* value_source,
-                                        const void* value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const void* expected,
+                                 const char* value_source,
+                                 const void* value) {
   if (expected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
              expected_source, value_source, value);
@@ -180,12 +182,12 @@
 
 // Helper function used by the CHECK function when given floating
 // point arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     double expected,
-                                     const char* value_source,
-                                     double value) {
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              double expected,
+                              const char* value_source,
+                              double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -201,12 +203,12 @@
 }
 
 
-static inline void CheckNonEqualsHelper(const char* file,
-                                     int line,
-                                     const char* expected_source,
-                                     double expected,
-                                     const char* value_source,
-                                     double value) {
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 double expected,
+                                 const char* value_source,
+                                 double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -257,11 +259,8 @@
     SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
 
 
-namespace v8 { namespace internal {
+extern bool FLAG_enable_slow_asserts;
 
-bool EnableSlowAsserts();
-
-} }  // namespace v8::internal
 
 // The ASSERT macro is equivalent to CHECK except that it only
 // generates code in debug builds.
@@ -273,7 +272,7 @@
 #define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
 #define ASSERT_LT(v1, v2)    CHECK_LT(v1, v2)
 #define ASSERT_LE(v1, v2)    CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
+#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
 #else
 #define ASSERT_RESULT(expr)     (expr)
 #define ASSERT(condition)      ((void) 0)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 724445e..ba7df80 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -40,7 +40,7 @@
 bool CodeStub::FindCodeInCache(Code** code_out) {
   Heap* heap = Isolate::Current()->heap();
   int index = heap->code_stubs()->FindEntry(GetKey());
-  if (index != UnseededNumberDictionary::kNotFound) {
+  if (index != NumberDictionary::kNotFound) {
     *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
     return true;
   }
@@ -52,11 +52,12 @@
   // Update the static counter each time a new code stub is generated.
   masm->isolate()->counters()->code_stubs()->Increment();
 
-  // Nested stubs are not allowed for leafs.
-  AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
+  // Nested stubs are not allowed for leaves.
+  AllowStubCallsScope allow_scope(masm, false);
 
   // Generate the code for the stub.
   masm->set_generating_stub(true);
+  NoCurrentFrameScope scope(masm);
   Generate(masm);
 }
 
@@ -118,17 +119,19 @@
     Handle<Code> new_object = factory->NewCode(
         desc, flags, masm.CodeObject(), NeedsImmovableCode());
     RecordCodeGeneration(*new_object, &masm);
-    FinishCode(*new_object);
+    FinishCode(new_object);
 
     // Update the dictionary and the root in Heap.
-    Handle<UnseededNumberDictionary> dict =
+    Handle<NumberDictionary> dict =
         factory->DictionaryAtNumberPut(
-            Handle<UnseededNumberDictionary>(heap->code_stubs()),
+            Handle<NumberDictionary>(heap->code_stubs()),
             GetKey(),
             new_object);
     heap->public_set_code_stubs(*dict);
-
     code = *new_object;
+    Activate(code);
+  } else {
+    CHECK(IsPregenerated() == code->is_pregenerated());
   }
 
   ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@@ -136,43 +139,6 @@
 }
 
 
-MaybeObject* CodeStub::TryGetCode() {
-  Code* code;
-  if (!FindCodeInCache(&code)) {
-    // Generate the new code.
-    MacroAssembler masm(Isolate::Current(), NULL, 256);
-    GenerateCode(&masm);
-    Heap* heap = masm.isolate()->heap();
-
-    // Create the code object.
-    CodeDesc desc;
-    masm.GetCode(&desc);
-
-    // Try to copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(
-        static_cast<Code::Kind>(GetCodeKind()),
-        GetICState());
-    Object* new_object;
-    { MaybeObject* maybe_new_object =
-          heap->CreateCode(desc, flags, masm.CodeObject());
-      if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
-    }
-    code = Code::cast(new_object);
-    RecordCodeGeneration(code, &masm);
-    FinishCode(code);
-
-    // Try to update the code cache but do not fail if unable.
-    MaybeObject* maybe_new_object =
-        heap->code_stubs()->AtNumberPut(GetKey(), code);
-    if (maybe_new_object->ToObject(&new_object)) {
-      heap->public_set_code_stubs(UnseededNumberDictionary::cast(new_object));
-    }
-  }
-
-  return code;
-}
-
-
 const char* CodeStub::MajorName(CodeStub::Major major_key,
                                 bool allow_unknown_keys) {
   switch (major_key) {
@@ -188,6 +154,11 @@
 }
 
 
+void CodeStub::PrintName(StringStream* stream) {
+  stream->Add("%s", MajorName(MajorKey(), false));
+}
+
+
 int ICCompareStub::MinorKey() {
   return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
 }
@@ -242,9 +213,18 @@
 }
 
 
+void JSEntryStub::FinishCode(Handle<Code> code) {
+  Handle<FixedArray> handler_table =
+      code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
+  handler_table->set(0, Smi::FromInt(handler_offset_));
+  code->set_handler_table(*handler_table);
+}
+
+
 void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
       KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
       break;
     case FAST_DOUBLE_ELEMENTS:
@@ -274,7 +254,11 @@
 void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+    case FAST_SMI_ONLY_ELEMENTS: {
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+                                                       is_js_array_,
+                                                       elements_kind_);
+    }
       break;
     case FAST_DOUBLE_ELEMENTS:
       KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@@ -302,24 +286,20 @@
 
 
 void ArgumentsAccessStub::PrintName(StringStream* stream) {
-  const char* type_name = NULL;  // Make g++ happy.
+  stream->Add("ArgumentsAccessStub_");
   switch (type_) {
-    case READ_ELEMENT: type_name = "ReadElement"; break;
-    case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
-    case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
-    case NEW_STRICT: type_name = "NewStrict"; break;
+    case READ_ELEMENT: stream->Add("ReadElement"); break;
+    case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
+    case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+    case NEW_STRICT: stream->Add("NewStrict"); break;
   }
-  stream->Add("ArgumentsAccessStub_%s", type_name);
 }
 
 
 void CallFunctionStub::PrintName(StringStream* stream) {
-  const char* flags_name = NULL;  // Make g++ happy.
-  switch (flags_) {
-    case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
-    case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
-  }
-  stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
+  stream->Add("CallFunctionStub_Args%d", argc_);
+  if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
+  if (RecordCallTarget()) stream->Add("_Recording");
 }
 
 
@@ -402,4 +382,29 @@
 }
 
 
+void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
+  Label fail;
+  if (!FLAG_trace_elements_transitions) {
+    if (to_ == FAST_ELEMENTS) {
+      if (from_ == FAST_SMI_ONLY_ELEMENTS) {
+        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+      } else if (from_ == FAST_DOUBLE_ELEMENTS) {
+        ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+      } else {
+        UNREACHABLE();
+      }
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+                                                       is_jsarray_,
+                                                       FAST_ELEMENTS);
+    } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
+      ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+      KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_);
+    } else {
+      UNREACHABLE();
+    }
+  }
+  masm->bind(&fail);
+  KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
+}
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 64c89b9..6bda5da 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -30,6 +30,7 @@
 
 #include "allocation.h"
 #include "globals.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -45,27 +46,22 @@
   V(Compare)                             \
   V(CompareIC)                           \
   V(MathPow)                             \
+  V(RecordWrite)                         \
+  V(StoreBufferOverflow)                 \
+  V(RegExpExec)                          \
   V(TranscendentalCache)                 \
   V(Instanceof)                          \
-  /* All stubs above this line only exist in a few versions, which are  */  \
-  /* generated ahead of time.  Therefore compiling a call to one of     */  \
-  /* them can't cause a new stub to be compiled, so compiling a call to */  \
-  /* them is GC safe.  The ones below this line exist in many variants  */  \
-  /* so code compiling a call to one can cause a GC.  This means they   */  \
-  /* can't be called from other stubs, since stub generation code is    */  \
-  /* not GC safe.                                                       */  \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
   V(StackCheck)                          \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
+  V(FastNewBlockContext)                 \
   V(FastCloneShallowArray)               \
-  V(RevertToNumber)                      \
+  V(FastCloneShallowObject)              \
   V(ToBoolean)                           \
   V(ToNumber)                            \
-  V(CounterOp)                           \
   V(ArgumentsAccess)                     \
-  V(RegExpExec)                          \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
@@ -73,7 +69,9 @@
   V(KeyedLoadElement)                    \
   V(KeyedStoreElement)                   \
   V(DebuggerStatement)                   \
-  V(StringDictionaryNegativeLookup)
+  V(StringDictionaryLookup)              \
+  V(ElementsTransitionAndStore)          \
+  V(StoreArrayLiteralElement)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -121,11 +119,6 @@
   // Retrieve the code for the stub. Generate the code if needed.
   Handle<Code> GetCode();
 
-  // Retrieve the code for the stub if already generated.  Do not
-  // generate the code if not already generated and instead return a
-  // retry after GC Failure object.
-  MUST_USE_RESULT MaybeObject* TryGetCode();
-
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
   }
@@ -142,14 +135,35 @@
 
   virtual ~CodeStub() {}
 
+  bool CompilingCallsToThisStubIsGCSafe() {
+    bool is_pregenerated = IsPregenerated();
+    Code* code = NULL;
+    CHECK(!is_pregenerated || FindCodeInCache(&code));
+    return is_pregenerated;
+  }
+
+  // See comment above, where Instanceof is defined.
+  virtual bool IsPregenerated() { return false; }
+
+  static void GenerateStubsAheadOfTime();
+  static void GenerateFPStubs();
+
+  // Some stubs put untagged junk on the stack that cannot be scanned by the
+  // GC.  This means that we must be statically sure that no GC can occur while
+  // they are running.  If that is the case they should override this to return
+  // true, which will cause an assertion if we try to call something that can
+  // GC or if we try to put a stack frame on top of the junk, which would not
+  // result in a traversable stack.
+  virtual bool SometimesSetsUpAFrame() { return true; }
+
+  // Lookup the code in the (possibly custom) cache.
+  bool FindCodeInCache(Code** code_out);
+
  protected:
   static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
-  // Lookup the code in the (possibly custom) cache.
-  bool FindCodeInCache(Code** code_out);
-
   // Nonvirtual wrapper around the stub-specific Generate function.  Call
   // this function to set up the macro assembler and generate the code.
   void GenerateCode(MacroAssembler* masm);
@@ -162,7 +176,11 @@
   void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
   // Finish the code object after it has been generated.
-  virtual void FinishCode(Code* code) { }
+  virtual void FinishCode(Handle<Code> code) { }
+
+  // Activate newly generated stub. Is called after
+  // registering stub in the stub cache.
+  virtual void Activate(Code* code) { }
 
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
@@ -178,9 +196,7 @@
 
   // Returns a name for logging/debugging purposes.
   SmartArrayPointer<const char> GetName();
-  virtual void PrintName(StringStream* stream) {
-    stream->Add("%s", MajorName(MajorKey(), false));
-  }
+  virtual void PrintName(StringStream* stream);
 
   // Returns whether the code generated for this stub needs to be allocated as
   // a fixed (non-moveable) code object.
@@ -193,9 +209,6 @@
            MajorKeyBits::encode(MajorKey());
   }
 
-  // See comment above, where Instanceof is defined.
-  bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
-
   class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
   class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
 
@@ -286,16 +299,17 @@
 
 class FastNewClosureStub : public CodeStub {
  public:
-  explicit FastNewClosureStub(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  explicit FastNewClosureStub(LanguageMode language_mode)
+    : language_mode_(language_mode) { }
 
   void Generate(MacroAssembler* masm);
 
  private:
   Major MajorKey() { return FastNewClosure; }
-  int MinorKey() { return strict_mode_; }
+  int MinorKey() { return language_mode_ == CLASSIC_MODE
+        ? kNonStrictMode : kStrictMode; }
 
-  StrictModeFlag strict_mode_;
+  LanguageMode language_mode_;
 };
 
 
@@ -304,7 +318,7 @@
   static const int kMaximumSlots = 64;
 
   explicit FastNewContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
   }
 
   void Generate(MacroAssembler* masm);
@@ -317,6 +331,24 @@
 };
 
 
+class FastNewBlockContextStub : public CodeStub {
+ public:
+  static const int kMaximumSlots = 64;
+
+  explicit FastNewBlockContextStub(int slots) : slots_(slots) {
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int slots_;
+
+  Major MajorKey() { return FastNewBlockContext; }
+  int MinorKey() { return slots_; }
+};
+
+
 class FastCloneShallowArrayStub : public CodeStub {
  public:
   // Maximum length of copied elements array.
@@ -324,14 +356,16 @@
 
   enum Mode {
     CLONE_ELEMENTS,
-    COPY_ON_WRITE_ELEMENTS
+    CLONE_DOUBLE_ELEMENTS,
+    COPY_ON_WRITE_ELEMENTS,
+    CLONE_ANY_ELEMENTS
   };
 
   FastCloneShallowArrayStub(Mode mode, int length)
       : mode_(mode),
         length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
-    ASSERT(length_ >= 0);
-    ASSERT(length_ <= kMaximumClonedLength);
+    ASSERT_GE(length_, 0);
+    ASSERT_LE(length_, kMaximumClonedLength);
   }
 
   void Generate(MacroAssembler* masm);
@@ -342,12 +376,32 @@
 
   Major MajorKey() { return FastCloneShallowArray; }
   int MinorKey() {
-    ASSERT(mode_ == 0 || mode_ == 1);
-    return (length_ << 1) | mode_;
+    ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
+    return length_ * 4 +  mode_;
   }
 };
 
 
+class FastCloneShallowObjectStub : public CodeStub {
+ public:
+  // Maximum number of properties in copied object.
+  static const int kMaximumClonedProperties = 6;
+
+  explicit FastCloneShallowObjectStub(int length) : length_(length) {
+    ASSERT_GE(length_, 0);
+    ASSERT_LE(length_, kMaximumClonedProperties);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  int length_;
+
+  Major MajorKey() { return FastCloneShallowObject; }
+  int MinorKey() { return length_; }
+};
+
+
 class InstanceofStub: public CodeStub {
  public:
   enum Flags {
@@ -410,7 +464,9 @@
   class OpField: public BitField<int, 0, 3> { };
   class StateField: public BitField<int, 3, 5> { };
 
-  virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
+  virtual void FinishCode(Handle<Code> code) {
+    code->set_compare_state(state_);
+  }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
   virtual int MinorKey();
@@ -513,7 +569,7 @@
   int MinorKey();
 
   virtual int GetCodeKind() { return Code::COMPARE_IC; }
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_compare_state(CompareIC::GENERIC);
   }
 
@@ -531,11 +587,18 @@
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size)
-      : result_size_(result_size), save_doubles_(false) { }
+  explicit CEntryStub(int result_size,
+                      SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+      : result_size_(result_size), save_doubles_(save_doubles) { }
 
   void Generate(MacroAssembler* masm);
-  void SaveDoubles() { save_doubles_ = true; }
+
+  // The version of this stub that doesn't save doubles is generated ahead of
+  // time, so it's OK to call it from other stubs that can't cope with GC during
+  // their code generation.  On machines that always have gp registers (x64) we
+  // can generate both variants ahead of time.
+  virtual bool IsPregenerated();
+  static void GenerateAheadOfTime();
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -550,7 +613,7 @@
 
   // Number of pointers/values returned.
   const int result_size_;
-  bool save_doubles_;
+  SaveFPRegsMode save_doubles_;
 
   Major MajorKey() { return CEntry; }
   int MinorKey();
@@ -571,6 +634,10 @@
  private:
   Major MajorKey() { return JSEntry; }
   int MinorKey() { return 0; }
+
+  virtual void FinishCode(Handle<Code> code);
+
+  int handler_offset_;
 };
 
 
@@ -647,10 +714,32 @@
 
   void Generate(MacroAssembler* masm);
 
+  virtual void FinishCode(Handle<Code> code);
+
+  static void Clear(Heap* heap, Address address);
+
+  static Object* GetCachedValue(Address address);
+
   static int ExtractArgcFromMinorKey(int minor_key) {
     return ArgcBits::decode(minor_key);
   }
 
+  // The object that indicates an uninitialized cache.
+  static Handle<Object> UninitializedSentinel(Isolate* isolate) {
+    return isolate->factory()->the_hole_value();
+  }
+
+  // A raw version of the uninitialized sentinel that's safe to read during
+  // garbage collection (e.g., for patching the cache).
+  static Object* RawUninitializedSentinel(Heap* heap) {
+    return heap->raw_unchecked_the_hole_value();
+  }
+
+  // The object that indicates a megamorphic state.
+  static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
+    return isolate->factory()->undefined_value();
+  }
+
  private:
   int argc_;
   CallFunctionFlags flags_;
@@ -658,8 +747,8 @@
   virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
-  class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
+  class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
+  class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() {
@@ -670,6 +759,10 @@
   bool ReceiverMightBeImplicit() {
     return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
+
+  bool RecordCallTarget() {
+    return (flags_ & RECORD_CALL_TARGET) != 0;
+  }
 };
 
 
@@ -698,7 +791,6 @@
  public:
   StringCharCodeAtGenerator(Register object,
                             Register index,
-                            Register scratch,
                             Register result,
                             Label* receiver_not_string,
                             Label* index_not_number,
@@ -706,15 +798,11 @@
                             StringIndexFlags index_flags)
       : object_(object),
         index_(index),
-        scratch_(scratch),
         result_(result),
         receiver_not_string_(receiver_not_string),
         index_not_number_(index_not_number),
         index_out_of_range_(index_out_of_range),
         index_flags_(index_flags) {
-    ASSERT(!scratch_.is(object_));
-    ASSERT(!scratch_.is(index_));
-    ASSERT(!scratch_.is(result_));
     ASSERT(!result_.is(object_));
     ASSERT(!result_.is(index_));
   }
@@ -732,7 +820,6 @@
  private:
   Register object_;
   Register index_;
-  Register scratch_;
   Register result_;
 
   Label* receiver_not_string_;
@@ -795,8 +882,7 @@
  public:
   StringCharAtGenerator(Register object,
                         Register index,
-                        Register scratch1,
-                        Register scratch2,
+                        Register scratch,
                         Register result,
                         Label* receiver_not_string,
                         Label* index_not_number,
@@ -804,13 +890,12 @@
                         StringIndexFlags index_flags)
       : char_code_at_generator_(object,
                                 index,
-                                scratch1,
-                                scratch2,
+                                scratch,
                                 receiver_not_string,
                                 index_not_number,
                                 index_out_of_range,
                                 index_flags),
-        char_from_code_generator_(scratch2, result) {}
+        char_from_code_generator_(scratch, result) {}
 
   // Generates the fast case code. On the fallthrough path |result|
   // register contains the result.
@@ -934,11 +1019,13 @@
   virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
   virtual void PrintName(StringStream* stream);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   Major MajorKey() { return ToBoolean; }
   int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_to_boolean_state(types_.ToByte());
   }
 
@@ -952,6 +1039,56 @@
   Types types_;
 };
 
+
+class ElementsTransitionAndStoreStub : public CodeStub {
+ public:
+  ElementsTransitionAndStoreStub(ElementsKind from,
+                                 ElementsKind to,
+                                 bool is_jsarray,
+                                 StrictModeFlag strict_mode)
+      : from_(from),
+        to_(to),
+        is_jsarray_(is_jsarray),
+        strict_mode_(strict_mode) {}
+
+ private:
+  class FromBits:       public BitField<ElementsKind,    0, 8> {};
+  class ToBits:         public BitField<ElementsKind,    8, 8> {};
+  class IsJSArrayBits:  public BitField<bool,           16, 8> {};
+  class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {};
+
+  Major MajorKey() { return ElementsTransitionAndStore; }
+  int MinorKey() {
+    return FromBits::encode(from_) |
+        ToBits::encode(to_) |
+        IsJSArrayBits::encode(is_jsarray_) |
+        StrictModeBits::encode(strict_mode_);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  ElementsKind from_;
+  ElementsKind to_;
+  bool is_jsarray_;
+  StrictModeFlag strict_mode_;
+
+  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+};
+
+
+class StoreArrayLiteralElementStub : public CodeStub {
+ public:
+  explicit StoreArrayLiteralElementStub() {}
+
+ private:
+  Major MajorKey() { return StoreArrayLiteralElement; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index cdc9ba1..ceea7b9 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -218,8 +218,8 @@
 
 
 int CEntryStub::MinorKey() {
+  int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
   ASSERT(result_size_ == 1 || result_size_ == 2);
-  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
   return result | ((result_size_ == 1) ? 0 : 2);
 #else
diff --git a/src/codegen.h b/src/codegen.h
index e551abf..5360d3e 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -81,4 +81,19 @@
 #error Unsupported target architecture.
 #endif
 
+namespace v8 {
+namespace internal {
+
+class ElementsTransitionGenerator : public AllStatic {
+ public:
+  static void GenerateSmiOnlyToObject(MacroAssembler* masm);
+  static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
+  static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
+};
+
+} }  // namespace v8::internal
+
 #endif  // V8_CODEGEN_H_
diff --git a/src/collection.js b/src/collection.js
new file mode 100644
index 0000000..d116126
--- /dev/null
+++ b/src/collection.js
@@ -0,0 +1,205 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+const $Set = global.Set;
+const $Map = global.Map;
+const $WeakMap = global.WeakMap;
+
+//-------------------------------------------------------------------
+
+// Global sentinel to be used instead of undefined keys, which are not
+// supported internally but required for Harmony sets and maps.
+var undefined_sentinel = {};
+
+
+function SetConstructor() {
+  if (%_IsConstructCall()) {
+    %SetInitialize(this);
+  } else {
+    return new $Set();
+  }
+}
+
+
+function SetAdd(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetAdd(this, key);
+}
+
+
+function SetHas(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetHas(this, key);
+}
+
+
+function SetDelete(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %SetDelete(this, key);
+}
+
+
+function MapConstructor() {
+  if (%_IsConstructCall()) {
+    %MapInitialize(this);
+  } else {
+    return new $Map();
+  }
+}
+
+
+function MapGet(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %MapGet(this, key);
+}
+
+
+function MapSet(key, value) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return %MapSet(this, key, value);
+}
+
+
+function MapHas(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  return !IS_UNDEFINED(%MapGet(this, key));
+}
+
+
+function MapDelete(key) {
+  if (IS_UNDEFINED(key)) {
+    key = undefined_sentinel;
+  }
+  if (!IS_UNDEFINED(%MapGet(this, key))) {
+    %MapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+function WeakMapConstructor() {
+  if (%_IsConstructCall()) {
+    %WeakMapInitialize(this);
+  } else {
+    return new $WeakMap();
+  }
+}
+
+
+function WeakMapGet(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return !IS_UNDEFINED(%WeakMapGet(this, key));
+}
+
+
+function WeakMapDelete(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
+    %WeakMapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// -------------------------------------------------------------------
+
+(function () {
+  %CheckIsBootstrapping();
+
+  // Set up the Set and Map constructor function.
+  %SetCode($Set, SetConstructor);
+  %SetCode($Map, MapConstructor);
+
+  // Set up the constructor property on the Set and Map prototype object.
+  %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+  %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the Set prototype object.
+  InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+    "add", SetAdd,
+    "has", SetHas,
+    "delete", SetDelete
+  ));
+
+  // Set up the non-enumerable functions on the Map prototype object.
+  InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+    "get", MapGet,
+    "set", MapSet,
+    "has", MapHas,
+    "delete", MapDelete
+  ));
+
+  // Set up the WeakMap constructor function.
+  %SetCode($WeakMap, WeakMapConstructor);
+
+  // Set up the constructor property on the WeakMap prototype object.
+  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakMap prototype object.
+  InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+    "get", WeakMapGet,
+    "set", WeakMapSet,
+    "has", WeakMapHas,
+    "delete", WeakMapDelete
+  ));
+})();
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 28e833a..82cc223 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,6 +27,7 @@
 
 #include "v8.h"
 
+#include "assembler.h"
 #include "compilation-cache.h"
 #include "serialize.h"
 
@@ -250,7 +251,8 @@
 Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
     Handle<String> source,
     Handle<Context> context,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode,
+    int scope_position) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -259,7 +261,8 @@
   { HandleScope scope(isolate());
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
-      result = table->LookupEval(*source, *context, strict_mode);
+      result = table->LookupEval(
+          *source, *context, language_mode, scope_position);
       if (result->IsSharedFunctionInfo()) {
         break;
       }
@@ -269,7 +272,7 @@
     Handle<SharedFunctionInfo>
         function_info(SharedFunctionInfo::cast(result), isolate());
     if (generation != 0) {
-      Put(source, context, function_info);
+      Put(source, context, function_info, scope_position);
     }
     isolate()->counters()->compilation_cache_hits()->Increment();
     return function_info;
@@ -283,27 +286,31 @@
 MaybeObject* CompilationCacheEval::TryTablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info) {
+    Handle<SharedFunctionInfo> function_info,
+    int scope_position) {
   Handle<CompilationCacheTable> table = GetFirstTable();
-  return table->PutEval(*source, *context, *function_info);
+  return table->PutEval(*source, *context, *function_info, scope_position);
 }
 
 
 Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info) {
+    Handle<SharedFunctionInfo> function_info,
+    int scope_position) {
   CALL_HEAP_FUNCTION(isolate(),
-                     TryTablePut(source, context, function_info),
+                     TryTablePut(
+                         source, context, function_info, scope_position),
                      CompilationCacheTable);
 }
 
 
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<Context> context,
-                               Handle<SharedFunctionInfo> function_info) {
+                               Handle<SharedFunctionInfo> function_info,
+                               int scope_position) {
   HandleScope scope(isolate());
-  SetFirstTable(TablePut(source, context, function_info));
+  SetFirstTable(TablePut(source, context, function_info, scope_position));
 }
 
 
@@ -389,16 +396,20 @@
     Handle<String> source,
     Handle<Context> context,
     bool is_global,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode,
+    int scope_position) {
   if (!IsEnabled()) {
     return Handle<SharedFunctionInfo>::null();
   }
 
   Handle<SharedFunctionInfo> result;
   if (is_global) {
-    result = eval_global_.Lookup(source, context, strict_mode);
+    result = eval_global_.Lookup(
+        source, context, language_mode, scope_position);
   } else {
-    result = eval_contextual_.Lookup(source, context, strict_mode);
+    ASSERT(scope_position != RelocInfo::kNoPosition);
+    result = eval_contextual_.Lookup(
+        source, context, language_mode, scope_position);
   }
   return result;
 }
@@ -427,16 +438,18 @@
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
                                bool is_global,
-                               Handle<SharedFunctionInfo> function_info) {
+                               Handle<SharedFunctionInfo> function_info,
+                               int scope_position) {
   if (!IsEnabled()) {
     return;
   }
 
   HandleScope scope(isolate());
   if (is_global) {
-    eval_global_.Put(source, context, function_info);
+    eval_global_.Put(source, context, function_info, scope_position);
   } else {
-    eval_contextual_.Put(source, context, function_info);
+    ASSERT(scope_position != RelocInfo::kNoPosition);
+    eval_contextual_.Put(source, context, function_info, scope_position);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 4339d22..31f2909 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -123,7 +123,19 @@
 };
 
 
-// Sub-cache for eval scripts.
+// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
+// in global contexts and one for eval calls in other contexts. The cache
+// considers the following pieces of information when checking for matching
+// entries:
+// 1. The source string.
+// 2. The shared function info of the calling function.
+// 3. Whether the source should be compiled as strict code or as non-strict
+//    code.
+//    Note: Currently there are clients of CompileEval that always compile
+//    non-strict code even if the calling function is a strict mode function.
+//    More specifically these are the CompileString, DebugEvaluate and
+//    DebugEvaluateGlobal runtime functions.
+// 4. The start position of the calling scope.
 class CompilationCacheEval: public CompilationSubCache {
  public:
   CompilationCacheEval(Isolate* isolate, int generations)
@@ -131,23 +143,27 @@
 
   Handle<SharedFunctionInfo> Lookup(Handle<String> source,
                                     Handle<Context> context,
-                                    StrictModeFlag strict_mode);
+                                    LanguageMode language_mode,
+                                    int scope_position);
 
   void Put(Handle<String> source,
            Handle<Context> context,
-           Handle<SharedFunctionInfo> function_info);
+           Handle<SharedFunctionInfo> function_info,
+           int scope_position);
 
  private:
   MUST_USE_RESULT MaybeObject* TryTablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info);
+      Handle<SharedFunctionInfo> function_info,
+      int scope_position);
 
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info);
+      Handle<SharedFunctionInfo> function_info,
+      int scope_position);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
 };
@@ -198,7 +214,8 @@
   Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
                                         Handle<Context> context,
                                         bool is_global,
-                                        StrictModeFlag strict_mode);
+                                        LanguageMode language_mode,
+                                        int scope_position);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -215,7 +232,8 @@
   void PutEval(Handle<String> source,
                Handle<Context> context,
                bool is_global,
-               Handle<SharedFunctionInfo> function_info);
+               Handle<SharedFunctionInfo> function_info,
+               int scope_position);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
new file mode 100644
index 0000000..3b9c59e
--- /dev/null
+++ b/src/compiler-intrinsics.h
@@ -0,0 +1,77 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_INTRINSICS_H_
+#define V8_COMPILER_INTRINSICS_H_
+
+namespace v8 {
+namespace internal {
+
+class CompilerIntrinsics {
+ public:
+  // Returns number of zero bits preceding least significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountTrailingZeros(uint32_t value));
+
+  // Returns number of zero bits following most significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountLeadingZeros(uint32_t value));
+};
+
+#ifdef __GNUC__
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  return __builtin_ctz(value);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  return __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanForward(&result, static_cast<long>(value));  //NOLINT
+  return static_cast<int>(result);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanReverse(&result, static_cast<long>(value));  //NOLINT
+  return 31 - static_cast<int>(result);
+}
+
+#else
+#error Unsupported compiler
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_COMPILER_INTRINSICS_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 5e1c4a9..16ccfa0 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -36,6 +36,7 @@
 #include "full-codegen.h"
 #include "gdb-jit.h"
 #include "hydrogen.h"
+#include "isolate-inl.h"
 #include "lithium.h"
 #include "liveedit.h"
 #include "parser.h"
@@ -52,13 +53,13 @@
 
 CompilationInfo::CompilationInfo(Handle<Script> script)
     : isolate_(script->GetIsolate()),
-      flags_(0),
+      flags_(LanguageModeField::encode(CLASSIC_MODE)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       script_(script),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(NONOPT);
 }
@@ -66,14 +67,15 @@
 
 CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
     : isolate_(shared_info->GetIsolate()),
-      flags_(IsLazy::encode(true)),
+      flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -81,15 +83,16 @@
 
 CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
     : isolate_(closure->GetIsolate()),
-      flags_(IsLazy::encode(true)),
+      flags_(LanguageModeField::encode(CLASSIC_MODE) |
+             IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
+      global_scope_(NULL),
       closure_(closure),
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -167,7 +170,11 @@
 static bool MakeCrankshaftCode(CompilationInfo* info) {
   // Test if we can optimize this function when asked to. We can only
   // do this after the scopes are computed.
-  if (!info->AllowOptimize()) info->DisableOptimization();
+  if (!info->AllowOptimize()) {
+    info->DisableOptimization();
+  } else if (info->IsOptimizable()) {
+    info->EnableDeoptimizationSupport();
+  }
 
   // In case we are not optimizing simply return the code from
   // the full code generator.
@@ -275,7 +282,7 @@
   }
 
   Handle<Context> global_context(info->closure()->context()->global_context());
-  TypeFeedbackOracle oracle(code, global_context);
+  TypeFeedbackOracle oracle(code, global_context, info->isolate());
   HGraphBuilder builder(info, &oracle);
   HPhase phase(HPhase::kTotal);
   HGraph* graph = builder.CreateGraph();
@@ -308,9 +315,9 @@
 
 
 static bool GenerateCode(CompilationInfo* info) {
-  return V8::UseCrankshaft() ?
-    MakeCrankshaftCode(info) :
-    FullCodeGenerator::MakeCode(info);
+  return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
+      FullCodeGenerator::MakeCode(info) :
+      MakeCrankshaftCode(info);
 }
 
 
@@ -328,8 +335,7 @@
   // the compilation info is set if compilation succeeded.
   bool succeeded = MakeCode(info);
   if (!info->shared_info().is_null()) {
-    Handle<SerializedScopeInfo> scope_info =
-        SerializedScopeInfo::Create(info->scope());
+    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
     info->shared_info()->set_scope_info(*scope_info);
   }
   return succeeded;
@@ -371,8 +377,14 @@
 
   // Only allow non-global compiles for eval.
   ASSERT(info->is_eval() || info->is_global());
-
-  if (!ParserApi::Parse(info)) return Handle<SharedFunctionInfo>::null();
+  ParsingFlags flags = kNoParsingFlags;
+  if (info->pre_parse_data() != NULL ||
+      String::cast(script->source())->length() > FLAG_min_preparse_length) {
+    flags = kAllowLazy;
+  }
+  if (!ParserApi::Parse(info, flags)) {
+    return Handle<SharedFunctionInfo>::null();
+  }
 
   // Measure how long it takes to do the compilation; only take the
   // rest of the function into account to avoid overlap with the
@@ -397,7 +409,7 @@
           lit->name(),
           lit->materialized_literal_count(),
           info->code(),
-          SerializedScopeInfo::Create(info->scope()));
+          ScopeInfo::Create(info->scope()));
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
   Compiler::SetFunctionInfo(result, lit, true, script);
@@ -447,7 +459,7 @@
                                              int line_offset,
                                              int column_offset,
                                              v8::Extension* extension,
-                                             ScriptDataImpl* input_pre_data,
+                                             ScriptDataImpl* pre_data,
                                              Handle<Object> script_data,
                                              NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
@@ -478,23 +490,12 @@
     // for small sources, odds are that there aren't many functions
     // that would be compiled lazily anyway, so we skip the preparse step
     // in that case too.
-    ScriptDataImpl* pre_data = input_pre_data;
-    bool harmony_block_scoping = natives != NATIVES_CODE &&
-                                 FLAG_harmony_block_scoping;
-    if (pre_data == NULL
-        && source_length >= FLAG_min_preparse_length) {
-      if (source->IsExternalTwoByteString()) {
-        ExternalTwoByteStringUC16CharacterStream stream(
-            Handle<ExternalTwoByteString>::cast(source), 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream,
-                                              extension,
-                                              harmony_block_scoping);
-      } else {
-        GenericStringUC16CharacterStream stream(source, 0, source->length());
-        pre_data = ParserApi::PartialPreParse(&stream,
-                                              extension,
-                                              harmony_block_scoping);
-      }
+    int flags = kNoParsingFlags;
+    if ((natives == NATIVES_CODE) || FLAG_allow_natives_syntax) {
+      flags |= kAllowNativesSyntax;
+    }
+    if (natives != NATIVES_CODE && FLAG_harmony_scoping) {
+      flags |= EXTENDED_MODE;
     }
 
     // Create a script object describing the script to be compiled.
@@ -520,11 +521,6 @@
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, result);
     }
-
-    // Get rid of the pre-parsing data (if necessary).
-    if (input_pre_data == NULL && pre_data != NULL) {
-      delete pre_data;
-    }
   }
 
   if (result.is_null()) isolate->ReportPendingMessages();
@@ -535,7 +531,8 @@
 Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
                                                  Handle<Context> context,
                                                  bool is_global,
-                                                 StrictModeFlag strict_mode) {
+                                                 LanguageMode language_mode,
+                                                 int scope_position) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
@@ -551,7 +548,8 @@
   result = compilation_cache->LookupEval(source,
                                          context,
                                          is_global,
-                                         strict_mode);
+                                         language_mode,
+                                         scope_position);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
@@ -559,16 +557,20 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
-    if (strict_mode == kStrictMode) info.MarkAsStrictMode();
+    info.SetLanguageMode(language_mode);
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
-      CompilationCache* compilation_cache = isolate->compilation_cache();
-      // If caller is strict mode, the result must be strict as well,
-      // but not the other way around. Consider:
+      // If caller is strict mode, the result must be in strict mode or
+      // extended mode as well, but not the other way around. Consider:
       // eval("'use strict'; ...");
-      ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
-      compilation_cache->PutEval(source, context, is_global, result);
+      ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
+      // If caller is in extended mode, the result must also be in
+      // extended mode.
+      ASSERT(language_mode != EXTENDED_MODE ||
+             result->is_extended_mode());
+      compilation_cache->PutEval(
+          source, context, is_global, result, scope_position);
     }
   }
 
@@ -591,17 +593,16 @@
   isolate->counters()->total_compile_size()->Increment(compiled_size);
 
   // Generate the AST for the lazily compiled function.
-  if (ParserApi::Parse(info)) {
+  if (ParserApi::Parse(info, kNoParsingFlags)) {
     // Measure how long it takes to do the lazy compilation; only take the
     // rest of the function into account to avoid overlap with the lazy
     // parsing statistics.
     HistogramTimerScope timer(isolate->counters()->compile_lazy());
 
-    // After parsing we know function's strict mode. Remember it.
-    if (info->function()->strict_mode()) {
-      shared->set_strict_mode(true);
-      info->MarkAsStrictMode();
-    }
+    // After parsing we know the function's language mode. Remember it.
+    LanguageMode language_mode = info->function()->language_mode();
+    info->SetLanguageMode(language_mode);
+    shared->set_language_mode(language_mode);
 
     // Compile the code.
     if (!MakeCode(info)) {
@@ -620,7 +621,7 @@
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
       if (info->IsOptimizing()) {
-        ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
+        ASSERT(shared->scope_info() != ScopeInfo::Empty());
         function->ReplaceCode(*code);
       } else {
         // Update the shared function info with the compiled code and the
@@ -628,8 +629,7 @@
         // info initialization is important since set_scope_info might
         // trigger a GC, causing the ASSERT below to be invalid if the code
         // was flushed. By settting the code object last we avoid this.
-        Handle<SerializedScopeInfo> scope_info =
-            SerializedScopeInfo::Create(info->scope());
+        Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
         shared->set_scope_info(*scope_info);
         shared->set_code(*code);
         if (!function.is_null()) {
@@ -681,7 +681,7 @@
   CompilationInfo info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
-  if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
+  info.SetLanguageMode(literal->scope()->language_mode());
 
   LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
   // Determine if the function can be lazily compiled. This is necessary to
@@ -692,7 +692,7 @@
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !LiveEditFunctionTracker::IsActive(info.isolate());
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   // Generate code
   if (FLAG_lazy && allow_lazy) {
@@ -701,7 +701,7 @@
   } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
              (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-    scope_info = SerializedScopeInfo::Create(info.scope());
+    scope_info = ScopeInfo::Create(info.scope());
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
@@ -733,8 +733,8 @@
                                FunctionLiteral* lit,
                                bool is_toplevel,
                                Handle<Script> script) {
-  function_info->set_length(lit->num_parameters());
-  function_info->set_formal_parameter_count(lit->num_parameters());
+  function_info->set_length(lit->parameter_count());
+  function_info->set_formal_parameter_count(lit->parameter_count());
   function_info->set_script(*script);
   function_info->set_function_token_position(lit->function_token_position());
   function_info->set_start_position(lit->start_position());
@@ -747,7 +747,7 @@
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
-  function_info->set_strict_mode(lit->strict_mode());
+  function_info->set_language_mode(lit->language_mode());
   function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
 }
diff --git a/src/compiler.h b/src/compiler.h
index 69ab27d..47eaeea 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -52,10 +52,15 @@
   bool is_lazy() const { return IsLazy::decode(flags_); }
   bool is_eval() const { return IsEval::decode(flags_); }
   bool is_global() const { return IsGlobal::decode(flags_); }
-  bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
+  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
+  bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
+  LanguageMode language_mode() const {
+    return LanguageModeField::decode(flags_);
+  }
   bool is_in_loop() const { return IsInLoop::decode(flags_); }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
+  Scope* global_scope() const { return global_scope_; }
   Handle<Code> code() const { return code_; }
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -73,11 +78,11 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
-  void MarkAsStrictMode() {
-    flags_ |= IsStrictMode::encode(true);
-  }
-  StrictModeFlag StrictMode() {
-    return is_strict_mode() ? kStrictMode : kNonStrictMode;
+  void SetLanguageMode(LanguageMode language_mode) {
+    ASSERT(this->language_mode() == CLASSIC_MODE ||
+           this->language_mode() == language_mode ||
+           language_mode == EXTENDED_MODE);
+    flags_ = LanguageModeField::update(flags_, language_mode);
   }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
@@ -97,6 +102,10 @@
     ASSERT(scope_ == NULL);
     scope_ = scope;
   }
+  void SetGlobalScope(Scope* global_scope) {
+    ASSERT(global_scope_ == NULL);
+    global_scope_ = global_scope;
+  }
   void SetCode(Handle<Code> code) { code_ = code; }
   void SetExtension(v8::Extension* extension) {
     ASSERT(!is_lazy());
@@ -114,6 +123,19 @@
     ASSERT(IsOptimizing());
     osr_ast_id_ = osr_ast_id;
   }
+  void MarkCompilingForDebugging(Handle<Code> current_code) {
+    ASSERT(mode_ != OPTIMIZE);
+    ASSERT(current_code->kind() == Code::FUNCTION);
+    flags_ |= IsCompilingForDebugging::encode(true);
+    if (current_code->is_compiled_optimizable()) {
+      EnableDeoptimizationSupport();
+    } else {
+      mode_ = CompilationInfo::NONOPT;
+    }
+  }
+  bool IsCompilingForDebugging() {
+    return IsCompilingForDebugging::decode(flags_);
+  }
 
   bool has_global_object() const {
     return !closure().is_null() && (closure()->context()->global() != NULL);
@@ -133,10 +155,12 @@
   void DisableOptimization();
 
   // Deoptimization support.
-  bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
+  bool HasDeoptimizationSupport() const {
+    return SupportsDeoptimization::decode(flags_);
+  }
   void EnableDeoptimizationSupport() {
     ASSERT(IsOptimizable());
-    supports_deoptimization_ = true;
+    flags_ |= SupportsDeoptimization::encode(true);
   }
 
   // Determine whether or not we can adaptively optimize.
@@ -171,8 +195,9 @@
     if (script_->type()->value() == Script::TYPE_NATIVE) {
       MarkAsNative();
     }
-    if (!shared_info_.is_null() && shared_info_->strict_mode()) {
-      MarkAsStrictMode();
+    if (!shared_info_.is_null()) {
+      ASSERT(language_mode() == CLASSIC_MODE);
+      SetLanguageMode(shared_info_->language_mode());
     }
   }
 
@@ -192,9 +217,14 @@
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
-  class IsStrictMode: public BitField<bool, 4, 1> {};
+  class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
   // Is this a function from our natives.
   class IsNative: public BitField<bool, 6, 1> {};
+  // Is this code being compiled with support for deoptimization..
+  class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+  // If compiling for debugging produce just full code matching the
+  // initial mode setting.
+  class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
 
 
   unsigned flags_;
@@ -205,6 +235,8 @@
   // The scope of the function literal as a convenience.  Set to indicate
   // that scopes have been analyzed.
   Scope* scope_;
+  // The global scope provided as a convenience.
+  Scope* global_scope_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -223,7 +255,6 @@
 
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
-  bool supports_deoptimization_;
   int osr_ast_id_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -267,7 +298,8 @@
   static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
                                                 Handle<Context> context,
                                                 bool is_global,
-                                                StrictModeFlag strict_mode);
+                                                LanguageMode language_mode,
+                                                int scope_position);
 
   // Compile from function info (used for lazy compilation). Returns true on
   // success and false if the compilation resulted in a stack overflow.
diff --git a/src/contexts.cc b/src/contexts.cc
index 4f93abd..76784bd 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -86,14 +86,14 @@
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
-                               int* index_,
+                               int* index,
                                PropertyAttributes* attributes,
                                BindingFlags* binding_flags) {
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
-  *index_ = -1;
+  *index = -1;
   *attributes = ABSENT;
   *binding_flags = MISSING_BINDING;
 
@@ -110,70 +110,51 @@
       PrintF("\n");
     }
 
-    // Check extension/with/global object.
-    if (!context->IsBlockContext() && context->has_extension()) {
-      if (context->IsCatchContext()) {
-        // Catch contexts have the variable name in the extension slot.
-        if (name->Equals(String::cast(context->extension()))) {
-          if (FLAG_trace_contexts) {
-            PrintF("=> found in catch context\n");
-          }
-          *index_ = Context::THROWN_OBJECT_INDEX;
-          *attributes = NONE;
-          *binding_flags = MUTABLE_IS_INITIALIZED;
-          return context;
-        }
+    // 1. Check global objects, subjects of with, and extension objects.
+    if (context->IsGlobalContext() ||
+        context->IsWithContext() ||
+        (context->IsFunctionContext() && context->has_extension())) {
+      Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+      // Context extension objects needs to behave as if they have no
+      // prototype.  So even if we want to follow prototype chains, we need
+      // to only do a local lookup for context extension objects.
+      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+          object->IsJSContextExtensionObject()) {
+        *attributes = object->GetLocalPropertyAttribute(*name);
       } else {
-        ASSERT(context->IsGlobalContext() ||
-               context->IsFunctionContext() ||
-               context->IsWithContext());
-        // Global, function, and with contexts may have an object in the
-        // extension slot.
-        Handle<JSObject> extension(JSObject::cast(context->extension()),
-                                   isolate);
-        // Context extension objects needs to behave as if they have no
-        // prototype.  So even if we want to follow prototype chains, we
-        // need to only do a local lookup for context extension objects.
-        if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
-            extension->IsJSContextExtensionObject()) {
-          *attributes = extension->GetLocalPropertyAttribute(*name);
-        } else {
-          *attributes = extension->GetPropertyAttribute(*name);
+        *attributes = object->GetPropertyAttribute(*name);
+      }
+      if (*attributes != ABSENT) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found property in context object %p\n",
+                 reinterpret_cast<void*>(*object));
         }
-        if (*attributes != ABSENT) {
-          // property found
-          if (FLAG_trace_contexts) {
-            PrintF("=> found property in context object %p\n",
-                   reinterpret_cast<void*>(*extension));
-          }
-          return extension;
-        }
+        return object;
       }
     }
 
-    // Check serialized scope information of functions and blocks. Only
-    // functions can have parameters, and a function name.
+    // 2. Check the context proper if it has slots.
     if (context->IsFunctionContext() || context->IsBlockContext()) {
-      // We may have context-local slots.  Check locals in the context.
-      Handle<SerializedScopeInfo> scope_info;
+      // Use serialized scope information of functions and blocks to search
+      // for the context index.
+      Handle<ScopeInfo> scope_info;
       if (context->IsFunctionContext()) {
-        scope_info = Handle<SerializedScopeInfo>(
+        scope_info = Handle<ScopeInfo>(
             context->closure()->shared()->scope_info(), isolate);
       } else {
-        ASSERT(context->IsBlockContext());
-        scope_info = Handle<SerializedScopeInfo>(
-            SerializedScopeInfo::cast(context->extension()), isolate);
+        scope_info = Handle<ScopeInfo>(
+            ScopeInfo::cast(context->extension()), isolate);
       }
-
-      Variable::Mode mode;
-      int index = scope_info->ContextSlotIndex(*name, &mode);
-      ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
-      if (index >= 0) {
+      VariableMode mode;
+      InitializationFlag init_flag;
+      int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+      ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+      if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
-                 index, mode);
+                 slot_index, mode);
         }
-        *index_ = index;
+        *index = slot_index;
         // Note: Fixed context slots are statically allocated by the compiler.
         // Statically allocated variables always have a statically known mode,
         // which is the mode with which they were declared when added to the
@@ -181,23 +162,31 @@
         // declared variables that were introduced through declaration nodes)
         // must not appear here.
         switch (mode) {
-          case Variable::INTERNAL:  // Fall through.
-          case Variable::VAR:
+          case INTERNAL:  // Fall through.
+          case VAR:
             *attributes = NONE;
             *binding_flags = MUTABLE_IS_INITIALIZED;
             break;
-          case Variable::LET:
+          case LET:
             *attributes = NONE;
-            *binding_flags = MUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
             break;
-          case Variable::CONST:
+          case CONST:
             *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
             break;
-          case Variable::DYNAMIC:
-          case Variable::DYNAMIC_GLOBAL:
-          case Variable::DYNAMIC_LOCAL:
-          case Variable::TEMPORARY:
+          case CONST_HARMONY:
+            *attributes = READ_ONLY;
+            *binding_flags = (init_flag == kNeedsInitialization)
+                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
+                IMMUTABLE_IS_INITIALIZED_HARMONY;
+            break;
+          case DYNAMIC:
+          case DYNAMIC_GLOBAL:
+          case DYNAMIC_LOCAL:
+          case TEMPORARY:
             UNREACHABLE();
             break;
         }
@@ -206,22 +195,37 @@
 
       // Check the slot corresponding to the intermediate context holding
       // only the function name variable.
-      if (follow_context_chain) {
-        int index = scope_info->FunctionContextSlotIndex(*name);
-        if (index >= 0) {
+      if (follow_context_chain && context->IsFunctionContext()) {
+        VariableMode mode;
+        int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
+        if (function_index >= 0) {
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
-                   index);
+                   function_index);
           }
-          *index_ = index;
+          *index = function_index;
           *attributes = READ_ONLY;
-          *binding_flags = IMMUTABLE_IS_INITIALIZED;
+          ASSERT(mode == CONST || mode == CONST_HARMONY);
+          *binding_flags = (mode == CONST)
+              ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
           return context;
         }
       }
+
+    } else if (context->IsCatchContext()) {
+      // Catch contexts have the variable name in the extension slot.
+      if (name->Equals(String::cast(context->extension()))) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found in catch context\n");
+        }
+        *index = Context::THROWN_OBJECT_INDEX;
+        *attributes = NONE;
+        *binding_flags = MUTABLE_IS_INITIALIZED;
+        return context;
+      }
     }
 
-    // Proceed with the previous context.
+    // 3. Prepare to continue with the previous (next outermost) context.
     if (context->IsGlobalContext()) {
       follow_context_chain = false;
     } else {
@@ -236,68 +240,6 @@
 }
 
 
-bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
-  Context* context = this;
-
-  // Check that there is no local with the given name in contexts
-  // before the global context and check that there are no context
-  // extension objects (conservative check for with statements).
-  while (!context->IsGlobalContext()) {
-    // Check if the context is a catch or with context, or has introduced
-    // bindings by calling non-strict eval.
-    if (context->has_extension()) return false;
-
-    // Not a with context so it must be a function context.
-    ASSERT(context->IsFunctionContext());
-
-    // Check non-parameter locals.
-    Handle<SerializedScopeInfo> scope_info(
-        context->closure()->shared()->scope_info());
-    Variable::Mode mode;
-    int index = scope_info->ContextSlotIndex(*name, &mode);
-    ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
-    if (index >= 0) return false;
-
-    // Check parameter locals.
-    int param_index = scope_info->ParameterIndex(*name);
-    if (param_index >= 0) return false;
-
-    // Check context only holding the function name variable.
-    index = scope_info->FunctionContextSlotIndex(*name);
-    if (index >= 0) return false;
-    context = context->previous();
-  }
-
-  // No local or potential with statement found so the variable is
-  // global unless it is shadowed by an eval-introduced variable.
-  return true;
-}
-
-
-void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
-                                   bool* outer_scope_calls_non_strict_eval) {
-  // Skip up the context chain checking all the function contexts to see
-  // whether they call eval.
-  Context* context = this;
-  while (!context->IsGlobalContext()) {
-    if (context->IsFunctionContext()) {
-      Handle<SerializedScopeInfo> scope_info(
-          context->closure()->shared()->scope_info());
-      if (scope_info->CallsEval()) {
-        *outer_scope_calls_eval = true;
-        if (!scope_info->IsStrictMode()) {
-          // No need to go further since the answers will not change from
-          // here.
-          *outer_scope_calls_non_strict_eval = true;
-          return;
-        }
-      }
-    }
-    context = context->previous();
-  }
-}
-
-
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsGlobalContext());
 #ifdef DEBUG
diff --git a/src/contexts.h b/src/contexts.h
index 505f86c..10ef33d 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -46,24 +46,43 @@
 
 // ES5 10.2 defines lexical environments with mutable and immutable bindings.
 // Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method.
+// their state is changed by the InitializeImmutableBinding method. The
+// BindingFlags enum represents information if a binding has definitely been
+// initialized. A mutable binding does not need to be checked and thus has
+// the BindingFlag MUTABLE_IS_INITIALIZED.
+//
+// There are two possibilities for immutable bindings
+//  * 'const' declared variables. They are initialized when evaluating the
+//    corresponding declaration statement. They need to be checked for being
+//    initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
+//  * The function name of a named function literal. The binding is immediately
+//    initialized when entering the function and thus does not need to be
+//    checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
+// Accessing an uninitialized binding produces the undefined value.
 //
 // The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings. A 'let' declared variable
-// is a mutable binding that is created uninitalized upon activation of its
-// lexical environment and it is initialized when evaluating its declaration
-// statement. Var declared variables are mutable bindings that are
-// immediately initialized upon creation. The BindingFlags enum represents
-// information if a binding has definitely been initialized. 'const' declared
-// variables are created as uninitialized immutable bindings.
-
-// In harmony mode accessing an uninitialized binding produces a reference
-// error.
+// uninitialized state for mutable bindings.
+//  * A 'let' declared variable. They are initialized when evaluating the
+//    corresponding declaration statement. They need to be checked for being
+//    initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
+//  * A 'var' declared variable. It is initialized immediately upon creation
+//    and thus doesn't need to be checked. It gets the flag
+//    MUTABLE_IS_INITIALIZED.
+//  * Catch bound variables, function parameters and variables introduced by
+//    function declarations are initialized immediately and do not need to be
+//    checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
+// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
+// an uninitialized binding produces a reference error.
+//
+// In V8 uninitialized bindings are set to the hole value upon creation and set
+// to a different value upon initialization.
 enum BindingFlags {
   MUTABLE_IS_INITIALIZED,
   MUTABLE_CHECK_INITIALIZED,
   IMMUTABLE_IS_INITIALIZED,
   IMMUTABLE_CHECK_INITIALIZED,
+  IMMUTABLE_IS_INITIALIZED_HARMONY,
+  IMMUTABLE_CHECK_INITIALIZED_HARMONY,
   MISSING_BINDING
 };
 
@@ -134,9 +153,13 @@
   V(MAP_CACHE_INDEX, Object, map_cache) \
   V(CONTEXT_DATA_INDEX, Object, data) \
   V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
+    to_complete_property_descriptor) \
   V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
   V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
-  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
+  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
+  V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
+  V(RANDOM_SEED_INDEX, ByteArray, random_seed)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -192,7 +215,8 @@
     PREVIOUS_INDEX,
     // The extension slot is used for either the global object (in global
     // contexts), eval extension object (function contexts), subject of with
-    // (with contexts), or the variable name (catch contexts).
+    // (with contexts), or the variable name (catch contexts), the serialized
+    // scope info (block contexts).
     EXTENSION_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
@@ -252,9 +276,12 @@
     OUT_OF_MEMORY_INDEX,
     CONTEXT_DATA_INDEX,
     ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+    TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
     DERIVED_HAS_TRAP_INDEX,
     DERIVED_GET_TRAP_INDEX,
     DERIVED_SET_TRAP_INDEX,
+    PROXY_ENUMERATE,
+    RANDOM_SEED_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
@@ -330,12 +357,6 @@
   // Mark the global context with out of memory.
   inline void mark_out_of_memory();
 
-  // The exception holder is the object used as a with object in
-  // the implementation of a catch block.
-  bool is_exception_holder(Object* object) {
-    return IsCatchContext() && extension() == object;
-  }
-
   // A global context hold a list of all functions which have been optimized.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
@@ -355,46 +376,28 @@
 #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
 
   // Lookup the the slot called name, starting with the current context.
-  // There are 4 possible outcomes:
+  // There are three possibilities:
   //
-  // 1) index_ >= 0 && result->IsContext():
-  //    most common case, the result is a Context, and index is the
-  //    context slot index, and the slot exists.
-  //    attributes == READ_ONLY for the function name variable, NONE otherwise.
+  // 1) result->IsContext():
+  //    The binding was found in a context.  *index is always the
+  //    non-negative slot index.  *attributes is NONE for var and let
+  //    declarations, READ_ONLY for const declarations (never ABSENT).
   //
-  // 2) index_ >= 0 && result->IsJSObject():
-  //    the result is the JSObject arguments object, the index is the parameter
-  //    index, i.e., key into the arguments object, and the property exists.
-  //    attributes != ABSENT.
+  // 2) result->IsJSObject():
+  //    The binding was found as a named property in a context extension
+  //    object (i.e., was introduced via eval), as a property on the subject
+  //    of with, or as a property of the global object.  *index is -1 and
+  //    *attributes is not ABSENT.
   //
-  // 3) index_ < 0 && result->IsJSObject():
-  //    the result is the JSObject extension context or the global object,
-  //    and the name is the property name, and the property exists.
-  //    attributes != ABSENT.
-  //
-  // 4) index_ < 0 && result.is_null():
-  //    there was no context found with the corresponding property.
-  //    attributes == ABSENT.
+  // 3) result.is_null():
+  //    There was no binding found, *index is always -1 and *attributes is
+  //    always ABSENT.
   Handle<Object> Lookup(Handle<String> name,
                         ContextLookupFlags flags,
-                        int* index_,
+                        int* index,
                         PropertyAttributes* attributes,
                         BindingFlags* binding_flags);
 
-  // Determine if a local variable with the given name exists in a
-  // context.  Do not consider context extension objects.  This is
-  // used for compiling code using eval.  If the context surrounding
-  // the eval call does not have a local variable with this name and
-  // does not contain a with statement the property is global unless
-  // it is shadowed by a property in an extension object introduced by
-  // eval.
-  bool GlobalIfNotShadowedByEval(Handle<String> name);
-
-  // Determine if any function scope in the context call eval and if
-  // any of those calls are in non-strict mode.
-  void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
-                            bool* outer_scope_calls_non_strict_eval);
-
   // Code generation support.
   static int SlotOffset(int index) {
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 41cf0d5..b098a1c 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -46,15 +46,15 @@
 namespace v8 {
 namespace internal {
 
-static inline double JunkStringValue() {
-  return std::numeric_limits<double>::quiet_NaN();
+inline double JunkStringValue() {
+  return BitCast<double, uint64_t>(kQuietNaNMask);
 }
 
 
 // The fast double-to-unsigned-int conversion routine does not guarantee
 // rounding towards zero, or any reasonable value if the argument is larger
 // than what fits in an unsigned 32-bit integer.
-static inline unsigned int FastD2UI(double x) {
+inline unsigned int FastD2UI(double x) {
   // There is no unsigned version of lrint, so there is no fast path
   // in this function as there is in FastD2I. Using lrint doesn't work
   // for values of 2^31 and above.
@@ -80,7 +80,7 @@
 }
 
 
-static inline double DoubleToInteger(double x) {
+inline double DoubleToInteger(double x) {
   if (isnan(x)) return 0;
   if (!isfinite(x) || x == 0) return x;
   return (x >= 0) ? floor(x) : ceil(x);
@@ -103,9 +103,9 @@
 
 
 template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
-                            EndMark end,
-                            const char* substring) {
+bool SubStringEquals(Iterator* current,
+                     EndMark end,
+                     const char* substring) {
   ASSERT(**current == *substring);
   for (substring++; *substring != '\0'; substring++) {
     ++*current;
@@ -119,9 +119,9 @@
 // Returns true if a nonspace character has been found and false if the
 // end was been reached before finding a nonspace character.
 template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
-                                     Iterator* current,
-                                     EndMark end) {
+inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+                              Iterator* current,
+                              EndMark end) {
   while (*current != end) {
     if (!unicode_cache->IsWhiteSpace(**current)) return true;
     ++*current;
@@ -132,11 +132,11 @@
 
 // Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
 template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
-                                        Iterator current,
-                                        EndMark end,
-                                        bool negative,
-                                        bool allow_trailing_junk) {
+double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+                                 Iterator current,
+                                 EndMark end,
+                                 bool negative,
+                                 bool allow_trailing_junk) {
   ASSERT(current != end);
 
   // Skip leading 0s.
@@ -235,10 +235,10 @@
 
 
 template <class Iterator, class EndMark>
-static double InternalStringToInt(UnicodeCache* unicode_cache,
-                                  Iterator current,
-                                  EndMark end,
-                                  int radix) {
+double InternalStringToInt(UnicodeCache* unicode_cache,
+                           Iterator current,
+                           EndMark end,
+                           int radix) {
   const bool allow_trailing_junk = true;
   const double empty_string_val = JunkStringValue();
 
@@ -430,11 +430,11 @@
 // 2. *current - gets the current character in the sequence.
 // 3. ++current (advances the position).
 template <class Iterator, class EndMark>
-static double InternalStringToDouble(UnicodeCache* unicode_cache,
-                                     Iterator current,
-                                     EndMark end,
-                                     int flags,
-                                     double empty_string_val) {
+double InternalStringToDouble(UnicodeCache* unicode_cache,
+                              Iterator current,
+                              EndMark end,
+                              int flags,
+                              double empty_string_val) {
   // To make sure that iterator dereferencing is valid the following
   // convention is used:
   // 1. Each '++current' statement is followed by check for equality to 'end'.
diff --git a/src/conversions.h b/src/conversions.h
index e51ad65..70559c9 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,8 +28,6 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
-#include <limits>
-
 #include "utils.h"
 
 namespace v8 {
@@ -47,14 +45,14 @@
 const int kMaxSignificantDigits = 772;
 
 
-static inline bool isDigit(int x, int radix) {
+inline bool isDigit(int x, int radix) {
   return (x >= '0' && x <= '9' && x < '0' + radix)
       || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
       || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
 }
 
 
-static inline double SignedZero(bool negative) {
+inline double SignedZero(bool negative) {
   return negative ? -0.0 : 0.0;
 }
 
@@ -63,16 +61,16 @@
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
 // integer value is outside the range of type int.
-static inline int FastD2I(double x) {
+inline int FastD2I(double x) {
   // The static_cast convertion from double to int used to be slow, but
   // as new benchmarks show, now it is much faster than lrint().
   return static_cast<int>(x);
 }
 
-static inline unsigned int FastD2UI(double x);
+inline unsigned int FastD2UI(double x);
 
 
-static inline double FastI2D(int x) {
+inline double FastI2D(int x) {
   // There is no rounding involved in converting an integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -80,7 +78,7 @@
 }
 
 
-static inline double FastUI2D(unsigned x) {
+inline double FastUI2D(unsigned x) {
   // There is no rounding involved in converting an unsigned integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -89,15 +87,15 @@
 
 
 // This function should match the exact semantics of ECMA-262 9.4.
-static inline double DoubleToInteger(double x);
+inline double DoubleToInteger(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.5.
-static inline int32_t DoubleToInt32(double x);
+inline int32_t DoubleToInt32(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.6.
-static inline uint32_t DoubleToUint32(double x) {
+inline uint32_t DoubleToUint32(double x) {
   return static_cast<uint32_t>(DoubleToInt32(x));
 }
 
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index adefba7..8fbc876 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,6 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 
 #include "d8.h"
 #include "d8-debug.h"
@@ -367,3 +368,5 @@
 
 
 }  // namespace v8
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/d8.cc b/src/d8.cc
index 63a7d15..9eccc7e 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -146,11 +146,11 @@
                           Handle<Value> name,
                           bool print_result,
                           bool report_exceptions) {
-#ifndef V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
   bool FLAG_debugger = i::FLAG_debugger;
 #else
   bool FLAG_debugger = false;
-#endif  // V8_SHARED
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
   HandleScope handle_scope;
   TryCatch try_catch;
   options.script_executed = true;
@@ -178,7 +178,8 @@
         // If all went well and the result wasn't undefined then print
         // the returned value.
         v8::String::Utf8Value str(result);
-        fwrite(*str, sizeof(**str), str.length(), stdout);
+        size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
+        (void) count;  // Silence GCC-4.5.x "unused result" warning.
         printf("\n");
       }
       return true;
@@ -594,6 +595,7 @@
   Context::Scope utility_scope(utility_context_);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+  if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
   // Install the debugger object in the utility scope
   i::Debug* debug = i::Isolate::Current()->debug();
   debug->Load();
@@ -760,13 +762,8 @@
 #endif  // V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
-
-  v8::TryCatch try_catch;
   Persistent<Context> context = Context::New(NULL, global_template);
-  if (context.IsEmpty()) {
-    v8::Local<v8::Value> st = try_catch.StackTrace();
-    ASSERT(!context.IsEmpty());
-  }
+  ASSERT(!context.IsEmpty());
   Context::Scope scope(context);
 
 #ifndef V8_SHARED
@@ -797,22 +794,47 @@
 
 
 #ifndef V8_SHARED
+struct CounterAndKey {
+  Counter* counter;
+  const char* key;
+};
+
+
+int CompareKeys(const void* a, const void* b) {
+  return strcmp(static_cast<const CounterAndKey*>(a)->key,
+                static_cast<const CounterAndKey*>(b)->key);
+}
+
+
 void Shell::OnExit() {
   if (console != NULL) console->Close();
   if (i::FLAG_dump_counters) {
-    printf("+----------------------------------------+-------------+\n");
-    printf("| Name                                   | Value       |\n");
-    printf("+----------------------------------------+-------------+\n");
+    int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
-      Counter* counter = i.CurrentValue();
+      number_of_counters++;
+    }
+    CounterAndKey* counters = new CounterAndKey[number_of_counters];
+    int j = 0;
+    for (CounterMap::Iterator i(counter_map_); i.More(); i.Next(), j++) {
+      counters[j].counter = i.CurrentValue();
+      counters[j].key = i.CurrentKey();
+    }
+    qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
+    printf("+--------------------------------------------+-------------+\n");
+    printf("| Name                                       | Value       |\n");
+    printf("+--------------------------------------------+-------------+\n");
+    for (j = 0; j < number_of_counters; j++) {
+      Counter* counter = counters[j].counter;
+      const char* key = counters[j].key;
       if (counter->is_histogram()) {
-        printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
-        printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
+        printf("| c:%-40s | %11i |\n", key, counter->count());
+        printf("| t:%-40s | %11i |\n", key, counter->sample_total());
       } else {
-        printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+        printf("| %-42s | %11i |\n", key, counter->count());
       }
     }
-    printf("+----------------------------------------+-------------+\n");
+    printf("+--------------------------------------------+-------------+\n");
+    delete [] counters;
   }
   if (counters_file_ != NULL)
     delete counters_file_;
@@ -821,7 +843,7 @@
 
 
 static FILE* FOpen(const char* path, const char* mode) {
-#if (defined(_WIN32) || defined(_WIN64))
+#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
   FILE* result;
   if (fopen_s(&result, path, mode) == 0) {
     return result;
@@ -905,9 +927,6 @@
 #ifndef V8_SHARED
   console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
-  if (i::FLAG_debugger) {
-    printf("JavaScript debugger enabled\n");
-  }
   console->Open();
   while (true) {
     i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@@ -1258,14 +1277,22 @@
     Locker lock;
     HandleScope scope;
     Persistent<Context> context = CreateEvaluationContext();
+    if (options.last_run) {
+      // Keep using the same context in the interactive shell.
+      evaluation_context_ = context;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+      // If the interactive debugger is enabled make sure to activate
+      // it before running the files passed on the command line.
+      if (i::FLAG_debugger) {
+        InstallUtilityScript();
+      }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+    }
     {
       Context::Scope cscope(context);
       options.isolate_sources[0].Execute();
     }
-    if (options.last_run) {
-      // Keep using the same context in the interactive shell
-      evaluation_context_ = context;
-    } else {
+    if (!options.last_run) {
       context.Dispose();
     }
 
@@ -1336,9 +1363,11 @@
   if (( options.interactive_shell
       || !options.script_executed )
       && !options.test_shell ) {
-#ifndef V8_SHARED
-    InstallUtilityScript();
-#endif  // V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+    if (!i::FLAG_debugger) {
+      InstallUtilityScript();
+    }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
     RunShell();
   }
 
diff --git a/src/d8.gyp b/src/d8.gyp
index 70186cf..bdc23a2 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -65,7 +65,7 @@
               'sources': [ 'd8-readline.cc' ],
             }],
             [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
-              or OS=="openbsd" or OS=="solaris")', {
+              or OS=="openbsd" or OS=="solaris" or OS=="android")', {
               'sources': [ 'd8-posix.cc', ]
             }],
             [ 'OS=="win"', {
diff --git a/src/d8.js b/src/d8.js
index 3009037..86b8c81 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -26,10 +26,11 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 String.prototype.startsWith = function (str) {
-  if (str.length > this.length)
+  if (str.length > this.length) {
     return false;
+  }
   return this.substr(0, str.length) == str;
-}
+};
 
 function log10(num) {
   return Math.log(num)/Math.log(10);
@@ -52,8 +53,9 @@
   for (var i = 0; i < parts.length; i++) {
     var part = parts[i];
     var next = current[part];
-    if (!next)
+    if (!next) {
       return [];
+    }
     current = next;
   }
   var result = [];
@@ -63,8 +65,9 @@
     var properties = mirror.properties();
     for (var i = 0; i < properties.length; i++) {
       var name = properties[i].name();
-      if (typeof name === 'string' && name.startsWith(last))
+      if (typeof name === 'string' && name.startsWith(last)) {
         result.push(name);
+      }
     }
     current = ToInspectableObject(current.__proto__);
   }
@@ -114,7 +117,7 @@
   displaySourceStartLine: -1,
   displaySourceEndLine: -1,
   currentSourceLine: -1
-}
+};
 var trace_compile = false;  // Tracing all compile events?
 var trace_debug_json = false; // Tracing all debug json packets?
 var last_cmd_line = '';
@@ -150,7 +153,7 @@
 }
 
 function DebugEventDetails(response) {
-  details = {text:'', running:false}
+  details = {text:'', running:false};
 
   // Get the running state.
   details.running = response.running();
@@ -217,7 +220,7 @@
 
     case 'afterCompile':
       if (trace_compile) {
-        result = 'Source ' + body.script.name + ' compiled:\n'
+        result = 'Source ' + body.script.name + ' compiled:\n';
         var source = body.script.source;
         if (!(source[source.length - 1] == '\n')) {
           result += source;
@@ -237,7 +240,7 @@
   }
 
   return details;
-};
+}
 
 
 function SourceInfo(body) {
@@ -279,7 +282,7 @@
 
   // Return the source line text with the underline beneath.
   return source_text + '\n' + underline;
-};
+}
 
 
 // Converts a text command to a JSON request.
@@ -289,7 +292,7 @@
     print("sending: '" + result + "'");
   }
   return result;
-};
+}
 
 
 function DebugRequest(cmd_line) {
@@ -514,7 +517,7 @@
 
 DebugRequest.prototype.JSONRequest = function() {
   return this.request_;
-}
+};
 
 
 function RequestPacket(command) {
@@ -536,14 +539,14 @@
     json += ',"arguments":';
     // Encode the arguments part.
     if (this.arguments.toJSONProtocol) {
-      json += this.arguments.toJSONProtocol()
+      json += this.arguments.toJSONProtocol();
     } else {
       json += SimpleObjectToJSON_(this.arguments);
     }
   }
   json += '}';
   return json;
-}
+};
 
 
 DebugRequest.prototype.createRequest = function(command) {
@@ -1310,7 +1313,7 @@
   }
 
   return request;
-}
+};
 
 
 function extractObjId(args) {
@@ -1499,7 +1502,7 @@
   } else {
     throw new Error('Invalid trace arguments.');
   }
-}
+};
 
 // Handle the help command.
 DebugRequest.prototype.helpCommand_ = function(args) {
@@ -1608,7 +1611,7 @@
   print('');
   print('disconnect|exit|quit       - disconnects and quits the debugger');
   print('help                       - prints this help information');
-}
+};
 
 
 function formatHandleReference_(value) {
@@ -1623,7 +1626,7 @@
 function formatObject_(value, include_properties) {
   var result = '';
   result += formatHandleReference_(value);
-  result += ', type: object'
+  result += ', type: object';
   result += ', constructor ';
   var ctor = value.constructorFunctionValue();
   result += formatHandleReference_(ctor);
@@ -1943,7 +1946,7 @@
 
 // Convert a JSON response to text for display in a text based debugger.
 function DebugResponseDetails(response) {
-  details = {text:'', running:false}
+  details = { text: '', running: false };
 
   try {
     if (!response.success()) {
@@ -2308,7 +2311,7 @@
   }
 
   return details;
-};
+}
 
 
 /**
@@ -2334,7 +2337,7 @@
  */
 ProtocolPackage.prototype.type = function() {
   return this.packet_.type;
-}
+};
 
 
 /**
@@ -2343,7 +2346,7 @@
  */
 ProtocolPackage.prototype.event = function() {
   return this.packet_.event;
-}
+};
 
 
 /**
@@ -2352,7 +2355,7 @@
  */
 ProtocolPackage.prototype.requestSeq = function() {
   return this.packet_.request_seq;
-}
+};
 
 
 /**
@@ -2361,27 +2364,27 @@
  */
 ProtocolPackage.prototype.running = function() {
   return this.packet_.running ? true : false;
-}
+};
 
 
 ProtocolPackage.prototype.success = function() {
   return this.packet_.success ? true : false;
-}
+};
 
 
 ProtocolPackage.prototype.message = function() {
   return this.packet_.message;
-}
+};
 
 
 ProtocolPackage.prototype.command = function() {
   return this.packet_.command;
-}
+};
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-}
+};
 
 
 ProtocolPackage.prototype.bodyValue = function(index) {
@@ -2390,12 +2393,12 @@
   } else {
     return new ProtocolValue(this.packet_.body, this);
   }
-}
+};
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-}
+};
 
 
 ProtocolPackage.prototype.lookup = function(handle) {
@@ -2405,12 +2408,12 @@
   } else {
     return new ProtocolReference(handle);
   }
-}
+};
 
 
 ProtocolPackage.prototype.raw_json = function() {
   return this.raw_json_;
-}
+};
 
 
 function ProtocolValue(value, packet) {
@@ -2425,7 +2428,7 @@
  */
 ProtocolValue.prototype.type = function() {
   return this.value_.type;
-}
+};
 
 
 /**
@@ -2434,7 +2437,7 @@
  */
 ProtocolValue.prototype.field = function(name) {
   return this.value_[name];
-}
+};
 
 
 /**
@@ -2444,7 +2447,7 @@
 ProtocolValue.prototype.isPrimitive = function() {
   return this.isUndefined() || this.isNull() || this.isBoolean() ||
          this.isNumber() || this.isString();
-}
+};
 
 
 /**
@@ -2453,7 +2456,7 @@
  */
 ProtocolValue.prototype.handle = function() {
   return this.value_.handle;
-}
+};
 
 
 /**
@@ -2462,7 +2465,7 @@
  */
 ProtocolValue.prototype.isUndefined = function() {
   return this.value_.type == 'undefined';
-}
+};
 
 
 /**
@@ -2471,7 +2474,7 @@
  */
 ProtocolValue.prototype.isNull = function() {
   return this.value_.type == 'null';
-}
+};
 
 
 /**
@@ -2480,7 +2483,7 @@
  */
 ProtocolValue.prototype.isBoolean = function() {
   return this.value_.type == 'boolean';
-}
+};
 
 
 /**
@@ -2489,7 +2492,7 @@
  */
 ProtocolValue.prototype.isNumber = function() {
   return this.value_.type == 'number';
-}
+};
 
 
 /**
@@ -2498,7 +2501,7 @@
  */
 ProtocolValue.prototype.isString = function() {
   return this.value_.type == 'string';
-}
+};
 
 
 /**
@@ -2508,7 +2511,7 @@
 ProtocolValue.prototype.isObject = function() {
   return this.value_.type == 'object' || this.value_.type == 'function' ||
          this.value_.type == 'error' || this.value_.type == 'regexp';
-}
+};
 
 
 /**
@@ -2518,7 +2521,7 @@
 ProtocolValue.prototype.constructorFunctionValue = function() {
   var ctor = this.value_.constructorFunction;
   return this.packet_.lookup(ctor.ref);
-}
+};
 
 
 /**
@@ -2528,7 +2531,7 @@
 ProtocolValue.prototype.protoObjectValue = function() {
   var proto = this.value_.protoObject;
   return this.packet_.lookup(proto.ref);
-}
+};
 
 
 /**
@@ -2537,7 +2540,7 @@
  */
 ProtocolValue.prototype.propertyCount = function() {
   return this.value_.properties ? this.value_.properties.length : 0;
-}
+};
 
 
 /**
@@ -2547,7 +2550,7 @@
 ProtocolValue.prototype.propertyName = function(index) {
   var property = this.value_.properties[index];
   return property.name;
-}
+};
 
 
 /**
@@ -2562,7 +2565,7 @@
     }
   }
   return null;
-}
+};
 
 
 /**
@@ -2572,7 +2575,7 @@
 ProtocolValue.prototype.propertyValue = function(index) {
   var property = this.value_.properties[index];
   return this.packet_.lookup(property.ref);
-}
+};
 
 
 /**
@@ -2581,12 +2584,12 @@
  */
 ProtocolValue.prototype.value = function() {
   return this.value_.value;
-}
+};
 
 
 ProtocolValue.prototype.valueString = function() {
   return this.value_.text;
-}
+};
 
 
 function ProtocolReference(handle) {
@@ -2596,7 +2599,7 @@
 
 ProtocolReference.prototype.handle = function() {
   return this.handle_;
-}
+};
 
 
 function MakeJSONPair_(name, value) {
@@ -2667,7 +2670,7 @@
         // Convert control character to unicode escape sequence.
         return '\\u00' +
           '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
-          '0' // TODO %NumberToRadixString(mapped % 16, 16);
+          '0'; // TODO %NumberToRadixString(mapped % 16, 16)
       })
     + '"';
   }
@@ -2738,7 +2741,7 @@
           if (property_value === null) {
             property_value_json = 'null';
           } else if (typeof property_value.toJSONProtocol == 'function') {
-            property_value_json = property_value.toJSONProtocol(true)
+            property_value_json = property_value.toJSONProtocol(true);
           } else if (property_value.constructor.name == 'Array'){
             property_value_json = SimpleArrayToJSON_(property_value);
           } else {
@@ -2789,7 +2792,7 @@
     }
     var elem = array[i];
     if (elem.toJSONProtocol) {
-      json += elem.toJSONProtocol(true)
+      json += elem.toJSONProtocol(true);
     } else if (typeof(elem) === 'object')  {
       json += SimpleObjectToJSON_(elem);
     } else if (typeof(elem) === 'boolean')  {
diff --git a/src/date.js b/src/date.js
index ccefce5..999009e 100644
--- a/src/date.js
+++ b/src/date.js
@@ -294,8 +294,8 @@
 }
 
 
-var ymd_from_time_cache = [$NaN, $NaN, $NaN];
-var ymd_from_time_cached_time = $NaN;
+var ymd_from_time_cache = [1970, 0, 1];
+var ymd_from_time_cached_time = 0;
 
 function YearFromTime(t) {
   if (t !== ymd_from_time_cached_time) {
@@ -304,7 +304,7 @@
     }
 
     %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
+    ymd_from_time_cached_time = t;
   }
 
   return ymd_from_time_cache[0];
@@ -316,7 +316,7 @@
       return $NaN;
     }
     %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
+    ymd_from_time_cached_time = t;
   }
 
   return ymd_from_time_cache[1];
@@ -329,7 +329,7 @@
     }
 
     %DateYMDFromTime(t, ymd_from_time_cache);
-    ymd_from_time_cached_time = t
+    ymd_from_time_cached_time = t;
   }
 
   return ymd_from_time_cache[2];
@@ -351,13 +351,12 @@
   date = TO_INTEGER_MAP_MINUS_ZERO(date);
 
   if (year < kMinYear || year > kMaxYear ||
-      month < kMinMonth || month > kMaxMonth ||
-      date < kMinDate || date > kMaxDate) {
+      month < kMinMonth || month > kMaxMonth) {
     return $NaN;
   }
 
-  // Now we rely on year, month and date being SMIs.
-  return %DateMakeDay(year, month, date);
+  // Now we rely on year and month being SMIs.
+  return %DateMakeDay(year, month) + date - 1;
 }
 
 
@@ -446,8 +445,9 @@
     minutes = argc > 4 ? ToNumber(minutes) : 0;
     seconds = argc > 5 ? ToNumber(seconds) : 0;
     ms = argc > 6 ? ToNumber(ms) : 0;
-    year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
-        ? 1900 + TO_INTEGER(year) : year;
+    year = (!NUMBER_IS_NAN(year) &&
+            0 <= TO_INTEGER(year) &&
+            TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
     var day = MakeDay(year, month, date);
     var time = MakeTime(hours, minutes, seconds, ms);
     value = TimeClip(UTC(MakeDate(day, time)));
@@ -460,7 +460,8 @@
 
 
 var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
 
 
 function TwoDigitString(value) {
@@ -476,8 +477,10 @@
 }
 
 
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
+    'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
+    'July', 'August', 'September', 'October', 'November', 'December'];
 
 
 function LongDateString(time) {
@@ -557,8 +560,9 @@
   minutes = argc > 4 ? ToNumber(minutes) : 0;
   seconds = argc > 5 ? ToNumber(seconds) : 0;
   ms = argc > 6 ? ToNumber(ms) : 0;
-  year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
-      ? 1900 + TO_INTEGER(year) : year;
+  year = (!NUMBER_IS_NAN(year) &&
+          0 <= TO_INTEGER(year) &&
+          TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
   var day = MakeDay(year, month, date);
   var time = MakeTime(hours, minutes, seconds, ms);
   return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
@@ -778,7 +782,10 @@
 function DateSetMilliseconds(ms) {
   var t = LocalTime(DATE_VALUE(this));
   ms = ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  var time = MakeTime(HOUR_FROM_TIME(t),
+                      MIN_FROM_TIME(t),
+                      SEC_FROM_TIME(t),
+                      ms);
   return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
 }
 
@@ -787,7 +794,10 @@
 function DateSetUTCMilliseconds(ms) {
   var t = DATE_VALUE(this);
   ms = ToNumber(ms);
-  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  var time = MakeTime(HOUR_FROM_TIME(t),
+                      MIN_FROM_TIME(t),
+                      SEC_FROM_TIME(t),
+                      ms);
   return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
 }
 
@@ -978,9 +988,10 @@
 }
 
 
+// ECMA 262 - 15.9.5.43
 function DateToISOString() {
   var t = DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) return kInvalidDate;
+  if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
   var year = this.getUTCFullYear();
   var year_string;
   if (year >= 0 && year <= 9999) {
@@ -1062,7 +1073,7 @@
 
   // Set up non-enumerable functions of the Date prototype object and
   // set their names.
-  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
+  InstallFunctions($Date.prototype, DONT_ENUM, $Array(
     "toString", DateToString,
     "toDateString", DateToDateString,
     "toTimeString", DateToTimeString,
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index d254ee5..8cbe0b3 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -286,7 +286,7 @@
   copy.condition_ = this.condition_;
   copy.ignoreCount_ = this.ignoreCount_;
   return copy;
-}
+};
 
 
 ScriptBreakPoint.prototype.number = function() {
@@ -335,13 +335,13 @@
     locations.push(this.break_points_[i].actual_location);
   }
   return locations;
-}
+};
 
 
 ScriptBreakPoint.prototype.update_positions = function(line, column) {
   this.line_ = line;
   this.column_ = column;
-}
+};
 
 
 ScriptBreakPoint.prototype.hit_count = function() {
@@ -477,9 +477,10 @@
 // break points set in this script.
 function UpdateScriptBreakPoints(script) {
   for (var i = 0; i < script_break_points.length; i++) {
-    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
-        script_break_points[i].matchesScript(script)) {
-      script_break_points[i].set(script);
+    var break_point = script_break_points[i];
+    if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName) &&
+        break_point.matchesScript(script)) {
+      break_point.set(script);
     }
   }
 }
@@ -585,7 +586,7 @@
   var script = %FunctionGetScript(func);
   var script_offset = %FunctionGetScriptSourcePosition(func);
   return script.locationFromLine(opt_line, opt_column, script_offset);
-}
+};
 
 
 // Returns the character position in a script based on a line number and an
@@ -593,7 +594,7 @@
 Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
   var location = script.locationFromLine(opt_line, opt_column);
   return location ? location.position : null;
-}
+};
 
 
 Debug.findBreakPoint = function(break_point_number, remove) {
@@ -627,7 +628,7 @@
     }
   }
   return [];
-}
+};
 
 Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
   if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
@@ -677,8 +678,9 @@
 {
   break_point = MakeBreakPoint(position);
   break_point.setCondition(condition);
-  if (!enabled)
+  if (!enabled) {
     break_point.disable();
+  }
   var scripts = this.scripts();
   for (var i = 0; i < scripts.length; i++) {
     if (script_id == scripts[i].id) {
@@ -771,7 +773,7 @@
     }
   }
   return script_break_point;
-}
+};
 
 
 // Sets a breakpoint in a script identified through id or name at the
@@ -799,7 +801,7 @@
   }
 
   return script_break_point.number();
-}
+};
 
 
 Debug.setScriptBreakPointById = function(script_id,
@@ -808,7 +810,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
                                   script_id, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.setScriptBreakPointByName = function(script_name,
@@ -817,7 +819,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
                                   script_name, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.setScriptBreakPointByRegExp = function(script_regexp,
@@ -826,7 +828,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
                                   script_regexp, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-}
+};
 
 
 Debug.enableScriptBreakPoint = function(break_point_number) {
@@ -841,13 +843,15 @@
 };
 
 
-Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
+Debug.changeScriptBreakPointCondition = function(
+    break_point_number, condition) {
   var script_break_point = this.findScriptBreakPoint(break_point_number, false);
   script_break_point.setCondition(condition);
 };
 
 
-Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
+Debug.changeScriptBreakPointIgnoreCount = function(
+    break_point_number, ignoreCount) {
   if (ignoreCount < 0) {
     throw new Error('Invalid argument');
   }
@@ -858,12 +862,12 @@
 
 Debug.scriptBreakPoints = function() {
   return script_break_points;
-}
+};
 
 
 Debug.clearStepping = function() {
   %ClearStepping();
-}
+};
 
 Debug.setBreakOnException = function() {
   return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
@@ -940,7 +944,7 @@
   var count = opt_count ? %ToNumber(opt_count) : 1;
 
   return %PrepareStep(this.break_id, action, count);
-}
+};
 
 ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
     opt_additional_context) {
@@ -960,8 +964,9 @@
 ExecutionState.prototype.frame = function(opt_index) {
   // If no index supplied return the selected frame.
   if (opt_index == null) opt_index = this.selected_frame;
-  if (opt_index < 0 || opt_index >= this.frameCount())
+  if (opt_index < 0 || opt_index >= this.frameCount()) {
     throw new Error('Illegal frame index.');
+  }
   return new FrameMirror(this.break_id, opt_index);
 };
 
@@ -1088,12 +1093,12 @@
 
 ExceptionEvent.prototype.exception = function() {
   return this.exception_;
-}
+};
 
 
 ExceptionEvent.prototype.uncaught = function() {
   return this.uncaught_;
-}
+};
 
 
 ExceptionEvent.prototype.func = function() {
@@ -1185,7 +1190,7 @@
   o.body.script = this.script_;
 
   return o.toJSONProtocol();
-}
+};
 
 
 function MakeNewFunctionEvent(func) {
@@ -1241,7 +1246,7 @@
   o.body = {};
   o.body.script = { id: this.id() };
   return o.toJSONProtocol();
-}
+};
 
 
 function MakeScriptObject_(script, include_source) {
@@ -1258,18 +1263,18 @@
     o.source = script.source();
   }
   return o;
-};
+}
 
 
 function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
   this.running_ = opt_is_running || false;
-};
+}
 
 
 DebugCommandProcessor.prototype.processDebugRequest = function (request) {
   return this.processDebugJSONRequest(request);
-}
+};
 
 
 function ProtocolMessage(request) {
@@ -1297,13 +1302,13 @@
     this.options_ = {};
   }
   this.options_[name] = value;
-}
+};
 
 
 ProtocolMessage.prototype.failed = function(message) {
   this.success = false;
   this.message = message;
-}
+};
 
 
 ProtocolMessage.prototype.toJSONProtocol = function() {
@@ -1351,7 +1356,7 @@
   }
   json.running = this.running;
   return JSON.stringify(json);
-}
+};
 
 
 DebugCommandProcessor.prototype.createResponse = function(request) {
@@ -1359,7 +1364,8 @@
 };
 
 
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(
+    json_request) {
   var request;  // Current request.
   var response;  // Generated response.
   try {
@@ -1646,7 +1652,7 @@
 
   // Add the break point number to the response.
   response.body = { type: type,
-                    breakpoint: break_point_number }
+                    breakpoint: break_point_number };
 
   // Add break point information to the response.
   if (break_point instanceof ScriptBreakPoint) {
@@ -1660,7 +1666,8 @@
       response.body.type = 'scriptRegExp';
       response.body.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " +
+                      break_point.type());
     }
     response.body.line = break_point.line();
     response.body.column = break_point.column();
@@ -1672,7 +1679,8 @@
 };
 
 
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1709,10 +1717,11 @@
   if (!IS_UNDEFINED(ignoreCount)) {
     Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
   }
-}
+};
 
 
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1743,10 +1752,11 @@
 
   // Add the cleared break point numbers to the response.
   response.body = { breakpoints: cleared_break_points };
-}
+};
 
 
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
+    request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1766,11 +1776,12 @@
   Debug.clearBreakPoint(break_point);
 
   // Add the cleared break point number to the response.
-  response.body = { breakpoint: break_point }
-}
+  response.body = { breakpoint: break_point };
+};
 
 
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
+    request, response) {
   var array = [];
   for (var i = 0; i < script_break_points.length; i++) {
     var break_point = script_break_points[i];
@@ -1785,7 +1796,7 @@
       condition: break_point.condition(),
       ignoreCount: break_point.ignoreCount(),
       actual_locations: break_point.actual_locations()
-    }
+    };
 
     if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
       description.type = 'scriptId';
@@ -1797,7 +1808,8 @@
       description.type = 'scriptRegExp';
       description.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " +
+                      break_point.type());
     }
     array.push(description);
   }
@@ -1806,15 +1818,15 @@
     breakpoints: array,
     breakOnExceptions: Debug.isBreakOnException(),
     breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
-  }
-}
+  };
+};
 
 
 DebugCommandProcessor.prototype.disconnectRequest_ =
     function(request, response) {
   Debug.disableAllBreakPoints();
   this.continueRequest_(request, response);
-}
+};
 
 
 DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
@@ -1859,10 +1871,11 @@
 
   // Add the cleared break point number to the response.
   response.body = { 'type': type, 'enabled': enabled };
-}
+};
 
 
-DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.backtraceRequest_ = function(
+    request, response) {
   // Get the number of frames.
   var total_frames = this.exec_state_.frameCount();
 
@@ -1870,12 +1883,12 @@
   if (total_frames == 0) {
     response.body = {
       totalFrames: total_frames
-    }
+    };
     return;
   }
 
   // Default frame range to include in backtrace.
-  var from_index = 0
+  var from_index = 0;
   var to_index = kDefaultBacktraceLength;
 
   // Get the range from the arguments.
@@ -1888,7 +1901,7 @@
     }
     if (request.arguments.bottom) {
       var tmp_index = total_frames - from_index;
-      from_index = total_frames - to_index
+      from_index = total_frames - to_index;
       to_index = tmp_index;
     }
     if (from_index < 0 || to_index < 0) {
@@ -1914,7 +1927,7 @@
     toFrame: to_index,
     totalFrames: total_frames,
     frames: frames
-  }
+  };
 };
 
 
@@ -1938,8 +1951,8 @@
 
 
 DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
-  // Get the frame for which the scope or scopes are requested. With no frameNumber
-  // argument use the currently selected frame.
+  // Get the frame for which the scope or scopes are requested.
+  // With no frameNumber argument use the currently selected frame.
   if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
     frame_index = request.arguments.frameNumber;
     if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
@@ -1949,7 +1962,7 @@
   } else {
     return this.exec_state_.frame();
   }
-}
+};
 
 
 DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
@@ -1972,7 +1985,7 @@
     toScope: total_scopes,
     totalScopes: total_scopes,
     scopes: scopes
-  }
+  };
 };
 
 
@@ -2217,7 +2230,8 @@
     if (!IS_UNDEFINED(request.arguments.types)) {
       types = %ToNumber(request.arguments.types);
       if (isNaN(types) || types < 0) {
-        return response.failed('Invalid types "' + request.arguments.types + '"');
+        return response.failed('Invalid types "' +
+                               request.arguments.types + '"');
       }
     }
 
@@ -2286,7 +2300,7 @@
     var details = %GetThreadDetails(this.exec_state_.break_id, i);
     var thread_info = { current: details[0],
                         id: details[1]
-                      }
+                      };
     threads.push(thread_info);
   }
 
@@ -2294,7 +2308,7 @@
   response.body = {
     totalThreads: total_threads,
     threads: threads
-  }
+  };
 };
 
 
@@ -2306,7 +2320,7 @@
 DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
   response.body = {
     V8Version: %GetV8Version()
-  }
+  };
 };
 
 
@@ -2322,7 +2336,8 @@
 };
 
 
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(
+    request, response) {
   if (!Debug.LiveEdit) {
     return response.failed('LiveEdit feature is not supported');
   }
@@ -2393,7 +2408,7 @@
       response.body.flags.push({ name: name, value: value });
     }
   }
-}
+};
 
 
 DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
@@ -2499,7 +2514,7 @@
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
   return this.running_;
-}
+};
 
 
 DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
@@ -2515,7 +2530,7 @@
     n = n >>> 4;
   }
   return r;
-};
+}
 
 
 /**
@@ -2591,7 +2606,7 @@
     case 'string':
     case 'number':
       json = value;
-      break
+      break;
 
     default:
       json = null;
diff --git a/src/debug.cc b/src/debug.cc
index 20cd802..c654dfb 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -40,6 +40,7 @@
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
+#include "isolate-inl.h"
 #include "list.h"
 #include "messages.h"
 #include "natives.h"
@@ -86,19 +87,13 @@
 
 static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(
-      isolate,
-      isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
-      Code);
+  return isolate->stub_cache()->ComputeCallDebugBreak(argc, kind);
 }
 
 
 static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(
-      isolate,
-      isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
-      Code);
+  return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
 }
 
 
@@ -401,15 +396,15 @@
   // Step in can only be prepared if currently positioned on an IC call,
   // construct call or CallFunction stub call.
   Address target = rinfo()->target_address();
-  Handle<Code> code(Code::GetCodeFromTargetAddress(target));
-  if (code->is_call_stub() || code->is_keyed_call_stub()) {
+  Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+  if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
     // Step in through IC call is handled by the runtime system. Therefore make
     // sure that the any current IC is cleared and the runtime system is
     // called. If the executing code has a debug break at the location change
     // the call in the original code as it is the code there that will be
     // executed in place of the debug break call.
-    Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
-                                                      code->kind());
+    Handle<Code> stub = ComputeCallDebugPrepareStepIn(
+        target_code->arguments_count(), target_code->kind());
     if (IsDebugBreak()) {
       original_rinfo()->set_target_address(stub->entry());
     } else {
@@ -419,7 +414,7 @@
 #ifdef DEBUG
     // All the following stuff is needed only for assertion checks so the code
     // is wrapped in ifdef.
-    Handle<Code> maybe_call_function_stub = code;
+    Handle<Code> maybe_call_function_stub = target_code;
     if (IsDebugBreak()) {
       Address original_target = original_rinfo()->target_address();
       maybe_call_function_stub =
@@ -436,8 +431,9 @@
     // Step in through CallFunction stub should also be prepared by caller of
     // this function (Debug::PrepareStep) which should flood target function
     // with breakpoints.
-    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
-           || is_call_function_stub);
+    ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+           target_code->is_inline_cache_stub() ||
+           is_call_function_stub);
 #endif
   }
 }
@@ -474,11 +470,11 @@
   RelocInfo::Mode mode = rmode();
   if (RelocInfo::IsCodeTarget(mode)) {
     Address target = rinfo()->target_address();
-    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+    Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
 
     // Patch the code to invoke the builtin debug break function matching the
     // calling convention used by the call site.
-    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
+    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
   }
 }
@@ -772,7 +768,7 @@
 
   // Execute the shared function in the debugger context.
   Handle<Context> context = isolate->global_context();
-  bool caught_exception = false;
+  bool caught_exception;
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
 
@@ -1103,14 +1099,13 @@
   Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
 
   // Call HandleBreakPointx.
-  bool caught_exception = false;
-  const int argc = 2;
-  Object** argv[argc] = {
-    break_id.location(),
-    reinterpret_cast<Object**>(break_point_object.location())
-  };
+  bool caught_exception;
+  Handle<Object> argv[] = { break_id, break_point_object };
   Handle<Object> result = Execution::TryCall(check_break_point,
-      isolate_->js_builtins_object(), argc, argv, &caught_exception);
+                                             isolate_->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
 
   // If exception or non boolean result handle as not triggered
   if (caught_exception || !result->IsBoolean()) {
@@ -1575,7 +1570,7 @@
   if (code->kind() == Code::STUB) {
     ASSERT(code->major_key() == CodeStub::CallFunction);
     Handle<Code> result =
-        Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
+        Isolate::Current()->builtins()->CallFunctionStub_DebugBreak();
     return result;
   }
 
@@ -1726,11 +1721,221 @@
 }
 
 
+// Helper function to compile full code for debugging. This code will
+// have debug break slots and deoptimization
+// information. Deoptimization information is required in case that an
+// optimized version of this function is still activated on the
+// stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version - that is flags which can
+// change the code generated. The current method of mapping from
+// already compiled full code without debug break slots to full code
+// with debug break slots depends on the generated code is otherwise
+// exactly the same.
+static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
+                                        Handle<Code> current_code) {
+  ASSERT(!current_code->has_debug_break_slots());
+
+  CompilationInfo info(shared);
+  info.MarkCompilingForDebugging(current_code);
+  ASSERT(!info.shared_info()->is_compiled());
+  ASSERT(!info.isolate()->has_pending_exception());
+
+  // Use compile lazy which will end up compiling the full code in the
+  // configuration configured above.
+  bool result = Compiler::CompileLazy(&info);
+  ASSERT(result != Isolate::Current()->has_pending_exception());
+  info.isolate()->clear_pending_exception();
+#if DEBUG
+  if (result) {
+    Handle<Code> new_code(shared->code());
+    ASSERT(new_code->has_debug_break_slots());
+    ASSERT(current_code->is_compiled_optimizable() ==
+           new_code->is_compiled_optimizable());
+    ASSERT(current_code->instruction_size() <= new_code->instruction_size());
+  }
+#endif
+  return result;
+}
+
+
 void Debug::PrepareForBreakPoints() {
   // If preparing for the first break point make sure to deoptimize all
   // functions as debugging does not work with optimized code.
   if (!has_break_points_) {
     Deoptimizer::DeoptimizeAll();
+
+    Handle<Code> lazy_compile =
+        Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+
+    // Keep the list of activated functions in a handlified list as it
+    // is used both in GC and non-GC code.
+    List<Handle<JSFunction> > active_functions(100);
+
+    {
+      // We are going to iterate heap to find all functions without
+      // debug break slots.
+      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
+      // Ensure no GC in this scope as we are comparing raw pointer
+      // values and performing a heap iteration.
+      AssertNoAllocation no_allocation;
+
+      // Find all non-optimized code functions with activation frames
+      // on the stack. This includes functions which have optimized
+      // activations (including inlined functions) on the stack as the
+      // non-optimized code is needed for the lazy deoptimization.
+      for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
+        JavaScriptFrame* frame = it.frame();
+        if (frame->is_optimized()) {
+          List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+          frame->GetFunctions(&functions);
+          for (int i = 0; i < functions.length(); i++) {
+            if (!functions[i]->shared()->code()->has_debug_break_slots()) {
+              active_functions.Add(Handle<JSFunction>(functions[i]));
+            }
+          }
+        } else if (frame->function()->IsJSFunction()) {
+          JSFunction* function = JSFunction::cast(frame->function());
+          if (function->code()->kind() == Code::FUNCTION &&
+              !function->code()->has_debug_break_slots()) {
+            active_functions.Add(Handle<JSFunction>(function));
+          }
+        }
+      }
+
+      // Sort the functions on the object pointer value to prepare for
+      // the binary search below.
+      active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
+
+      // Scan the heap for all non-optimized functions which has no
+      // debug break slots.
+      HeapIterator iterator;
+      HeapObject* obj = NULL;
+      while (((obj = iterator.next()) != NULL)) {
+        if (obj->IsJSFunction()) {
+          JSFunction* function = JSFunction::cast(obj);
+          if (function->shared()->allows_lazy_compilation() &&
+              function->shared()->script()->IsScript() &&
+              function->code()->kind() == Code::FUNCTION &&
+              !function->code()->has_debug_break_slots()) {
+            bool has_activation =
+                SortedListBSearch<Handle<JSFunction> >(
+                    active_functions,
+                    Handle<JSFunction>(function),
+                    HandleObjectPointerCompare<JSFunction>) != -1;
+            if (!has_activation) {
+              function->set_code(*lazy_compile);
+              function->shared()->set_code(*lazy_compile);
+            }
+          }
+        }
+      }
+    }
+
+    // Now the non-GC scope is left, and the sorting of the functions
+    // in active_function is not ensured any more. The code below does
+    // not rely on it.
+
+    // Now recompile all functions with activation frames and and
+    // patch the return address to run in the new compiled code.
+    for (int i = 0; i < active_functions.length(); i++) {
+      Handle<JSFunction> function = active_functions[i];
+      Handle<SharedFunctionInfo> shared(function->shared());
+      // If recompilation is not possible just skip it.
+      if (shared->is_toplevel() ||
+          !shared->allows_lazy_compilation() ||
+          shared->code()->kind() == Code::BUILTIN) {
+        continue;
+      }
+
+      // Make sure that the shared full code is compiled with debug
+      // break slots.
+      if (function->code() == *lazy_compile) {
+        function->set_code(shared->code());
+      }
+      Handle<Code> current_code(function->code());
+      if (shared->code()->has_debug_break_slots()) {
+        // if the code is already recompiled to have break slots skip
+        // recompilation.
+        ASSERT(!function->code()->has_debug_break_slots());
+      } else {
+        // Try to compile the full code with debug break slots. If it
+        // fails just keep the current code.
+        ASSERT(shared->code() == *current_code);
+        ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
+        shared->set_code(*lazy_compile);
+        bool prev_force_debugger_active =
+            isolate_->debugger()->force_debugger_active();
+        isolate_->debugger()->set_force_debugger_active(true);
+        CompileFullCodeForDebugging(shared, current_code);
+        isolate_->debugger()->set_force_debugger_active(
+            prev_force_debugger_active);
+        if (!shared->is_compiled()) {
+          shared->set_code(*current_code);
+          continue;
+        }
+      }
+      Handle<Code> new_code(shared->code());
+
+      // Find the function and patch the return address.
+      for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
+        JavaScriptFrame* frame = it.frame();
+        // If the current frame is for this function in its
+        // non-optimized form rewrite the return address to continue
+        // in the newly compiled full code with debug break slots.
+        if (frame->function()->IsJSFunction() &&
+            frame->function() == *function &&
+            frame->LookupCode()->kind() == Code::FUNCTION) {
+          intptr_t delta = frame->pc() - current_code->instruction_start();
+          int debug_break_slot_count = 0;
+          int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+          for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
+            // Check if the pc in the new code with debug break
+            // slots is before this slot.
+            RelocInfo* info = it.rinfo();
+            int debug_break_slot_bytes =
+                debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+            intptr_t new_delta =
+                info->pc() -
+                new_code->instruction_start() -
+                debug_break_slot_bytes;
+            if (new_delta > delta) {
+              break;
+            }
+
+            // Passed a debug break slot in the full code with debug
+            // break slots.
+            debug_break_slot_count++;
+          }
+          int debug_break_slot_bytes =
+              debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+          if (FLAG_trace_deopt) {
+            PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+                   "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+                   "for debugging, "
+                   "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
+                   reinterpret_cast<intptr_t>(
+                       current_code->instruction_start()),
+                   reinterpret_cast<intptr_t>(
+                       current_code->instruction_start()) +
+                       current_code->instruction_size(),
+                   current_code->instruction_size(),
+                   reinterpret_cast<intptr_t>(new_code->instruction_start()),
+                   reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+                       new_code->instruction_size(),
+                   new_code->instruction_size(),
+                   reinterpret_cast<intptr_t>(frame->pc()),
+                   reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+                       delta + debug_break_slot_bytes);
+          }
+
+          // Patch the return address to return into the code with
+          // debug break slots.
+          frame->set_pc(
+              new_code->instruction_start() + delta + debug_break_slot_bytes);
+        }
+      }
+    }
   }
 }
 
@@ -1744,7 +1949,9 @@
   }
 
   // Ensure shared in compiled. Return false if this failed.
-  if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+  if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+    return false;
+  }
 
   // Create the debug info object.
   Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@@ -1959,9 +2166,10 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which are no longer referenced.
-  heap->CollectAllGarbage(false);
-  heap->CollectAllGarbage(false);
+  // scripts which are no longer referenced.  The second also sweeps precisely,
+  // which saves us doing yet another GC to make the heap iterable.
+  heap->CollectAllGarbage(Heap::kNoGCFlags);
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -1969,6 +2177,8 @@
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
+  AssertNoAllocation no_allocation;
+
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2009,7 +2219,7 @@
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  isolate_->heap()->CollectAllGarbage(false);
+  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
@@ -2031,6 +2241,7 @@
       compiling_natives_(false),
       is_loading_debugger_(false),
       never_unload_debugger_(false),
+      force_debugger_active_(false),
       message_handler_(NULL),
       debugger_unload_pending_(false),
       host_dispatch_handler_(NULL),
@@ -2055,7 +2266,8 @@
 
 
 Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
-                                      int argc, Object*** argv,
+                                      int argc,
+                                      Handle<Object> argv[],
                                       bool* caught_exception) {
   ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
 
@@ -2072,7 +2284,9 @@
   Handle<Object> js_object = Execution::TryCall(
       Handle<JSFunction>::cast(constructor),
       Handle<JSObject>(isolate_->debug()->debug_context()->global()),
-      argc, argv, caught_exception);
+      argc,
+      argv,
+      caught_exception);
   return js_object;
 }
 
@@ -2081,10 +2295,11 @@
   // Create the execution state object.
   Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
       isolate_->debug()->break_id());
-  const int argc = 1;
-  Object** argv[argc] = { break_id.location() };
+  Handle<Object> argv[] = { break_id };
   return MakeJSObject(CStrVector("MakeExecutionState"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
@@ -2092,11 +2307,9 @@
                                         Handle<Object> break_points_hit,
                                         bool* caught_exception) {
   // Create the new break event object.
-  const int argc = 2;
-  Object** argv[argc] = { exec_state.location(),
-                          break_points_hit.location() };
+  Handle<Object> argv[] = { exec_state, break_points_hit };
   return MakeJSObject(CStrVector("MakeBreakEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2108,23 +2321,24 @@
                                             bool* caught_exception) {
   Factory* factory = isolate_->factory();
   // Create the new exception event object.
-  const int argc = 3;
-  Object** argv[argc] = { exec_state.location(),
-                          exception.location(),
-                          uncaught ? factory->true_value().location() :
-                                     factory->false_value().location()};
+  Handle<Object> argv[] = { exec_state,
+                            exception,
+                            factory->ToBoolean(uncaught) };
   return MakeJSObject(CStrVector("MakeExceptionEvent"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
 Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
                                               bool* caught_exception) {
   // Create the new function event object.
-  const int argc = 1;
-  Object** argv[argc] = { function.location() };
+  Handle<Object> argv[] = { function };
   return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
-                      argc, argv, caught_exception);
+                      ARRAY_SIZE(argv),
+                      argv,
+                      caught_exception);
 }
 
 
@@ -2135,14 +2349,11 @@
   // Create the compile event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> script_wrapper = GetScriptWrapper(script);
-  const int argc = 3;
-  Object** argv[argc] = { exec_state.location(),
-                          script_wrapper.location(),
-                          before ? factory->true_value().location() :
-                                   factory->false_value().location() };
-
+  Handle<Object> argv[] = { exec_state,
+                            script_wrapper,
+                            factory->ToBoolean(before) };
   return MakeJSObject(CStrVector("MakeCompileEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2153,11 +2364,10 @@
   // Create the script collected event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
-  const int argc = 2;
-  Object** argv[argc] = { exec_state.location(), id_object.location() };
+  Handle<Object> argv[] = { exec_state, id_object };
 
   return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
-                      argc,
+                      ARRAY_SIZE(argv),
                       argv,
                       caught_exception);
 }
@@ -2307,12 +2517,13 @@
   Handle<JSValue> wrapper = GetScriptWrapper(script);
 
   // Call UpdateScriptBreakPoints expect no exceptions.
-  bool caught_exception = false;
-  const int argc = 1;
-  Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
+  bool caught_exception;
+  Handle<Object> argv[] = { wrapper };
   Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
-      Isolate::Current()->js_builtins_object(), argc, argv,
-      &caught_exception);
+                     Isolate::Current()->js_builtins_object(),
+                     ARRAY_SIZE(argv),
+                     argv,
+                     &caught_exception);
   if (caught_exception) {
     return;
   }
@@ -2425,7 +2636,8 @@
                                   v8::Debug::ClientData* client_data) {
   Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(
+          callback_obj->foreign_address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
@@ -2443,13 +2655,16 @@
   Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
 
   // Invoke the JavaScript debug event listener.
-  const int argc = 4;
-  Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
-                          exec_state.location(),
-                          Handle<Object>::cast(event_data).location(),
-                          event_listener_data_.location() };
-  bool caught_exception = false;
-  Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
+  Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
+                            exec_state,
+                            event_data,
+                            event_listener_data_ };
+  bool caught_exception;
+  Execution::TryCall(fun,
+                     isolate_->global(),
+                     ARRAY_SIZE(argv),
+                     argv,
+                     &caught_exception);
   // Silently ignore exceptions from debug event listeners.
 }
 
@@ -2795,7 +3010,9 @@
 bool Debugger::IsDebuggerActive() {
   ScopedLock with(debugger_access_);
 
-  return message_handler_ != NULL || !event_listener_.is_null();
+  return message_handler_ != NULL ||
+      !event_listener_.is_null() ||
+      force_debugger_active_;
 }
 
 
@@ -2818,12 +3035,11 @@
     return isolate_->factory()->undefined_value();
   }
 
-  static const int kArgc = 2;
-  Object** argv[kArgc] = { exec_state.location(), data.location() };
+  Handle<Object> argv[] = { exec_state, data };
   Handle<Object> result = Execution::Call(
       fun,
       Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
-      kArgc,
+      ARRAY_SIZE(argv),
       argv,
       pending_exception);
   return result;
@@ -2891,6 +3107,94 @@
 }
 
 
+EnterDebugger::EnterDebugger()
+    : isolate_(Isolate::Current()),
+      prev_(isolate_->debug()->debugger_entry()),
+      it_(isolate_),
+      has_js_frames_(!it_.done()),
+      save_(isolate_) {
+  Debug* debug = isolate_->debug();
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+  // Link recursive debugger entry.
+  debug->set_debugger_entry(this);
+
+  // Store the previous break id and frame id.
+  break_id_ = debug->break_id();
+  break_frame_id_ = debug->break_frame_id();
+
+  // Create the new break info. If there is no JavaScript frames there is no
+  // break frame id.
+  if (has_js_frames_) {
+    debug->NewBreak(it_.frame()->id());
+  } else {
+    debug->NewBreak(StackFrame::NO_ID);
+  }
+
+  // Make sure that debugger is loaded and enter the debugger context.
+  load_failed_ = !debug->Load();
+  if (!load_failed_) {
+    // NOTE the member variable save which saves the previous context before
+    // this change.
+    isolate_->set_context(*debug->debug_context());
+  }
+}
+
+
+EnterDebugger::~EnterDebugger() {
+  ASSERT(Isolate::Current() == isolate_);
+  Debug* debug = isolate_->debug();
+
+  // Restore to the previous break state.
+  debug->SetBreak(break_frame_id_, break_id_);
+
+  // Check for leaving the debugger.
+  if (prev_ == NULL) {
+    // Clear mirror cache when leaving the debugger. Skip this if there is a
+    // pending exception as clearing the mirror cache calls back into
+    // JavaScript. This can happen if the v8::Debug::Call is used in which
+    // case the exception should end up in the calling code.
+    if (!isolate_->has_pending_exception()) {
+      // Try to avoid any pending debug break breaking in the clear mirror
+      // cache JavaScript code.
+      if (isolate_->stack_guard()->IsDebugBreak()) {
+        debug->set_interrupts_pending(DEBUGBREAK);
+        isolate_->stack_guard()->Continue(DEBUGBREAK);
+      }
+      debug->ClearMirrorCache();
+    }
+
+    // Request preemption and debug break when leaving the last debugger entry
+    // if any of these where recorded while debugging.
+    if (debug->is_interrupt_pending(PREEMPT)) {
+      // This re-scheduling of preemption is to avoid starvation in some
+      // debugging scenarios.
+      debug->clear_interrupt_pending(PREEMPT);
+      isolate_->stack_guard()->Preempt();
+    }
+    if (debug->is_interrupt_pending(DEBUGBREAK)) {
+      debug->clear_interrupt_pending(DEBUGBREAK);
+      isolate_->stack_guard()->DebugBreak();
+    }
+
+    // If there are commands in the queue when leaving the debugger request
+    // that these commands are processed.
+    if (isolate_->debugger()->HasCommands()) {
+      isolate_->stack_guard()->DebugCommand();
+    }
+
+    // If leaving the debugger with the debugger no longer active unload it.
+    if (!isolate_->debugger()->IsDebuggerActive()) {
+      isolate_->debugger()->UnloadDebugger();
+    }
+  }
+
+  // Leaving this debugger entry.
+  debug->set_debugger_entry(prev_);
+}
+
+
 MessageImpl MessageImpl::NewEvent(DebugEvent event,
                                   bool running,
                                   Handle<JSObject> exec_state,
diff --git a/src/debug.h b/src/debug.h
index a5083eb..a39d801 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -178,9 +178,7 @@
 
  private:
   // Calculate the hash value from the key (script id).
-  static uint32_t Hash(int key) {
-    return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
-  }
+  static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
 
   // Scripts match if their keys (script id) match.
   static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
@@ -404,7 +402,7 @@
   static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
   static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
   static void GenerateReturnDebugBreak(MacroAssembler* masm);
-  static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+  static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
   static void GenerateSlotDebugBreak(MacroAssembler* masm);
   static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
 
@@ -707,7 +705,8 @@
   void DebugRequest(const uint16_t* json_request, int length);
 
   Handle<Object> MakeJSObject(Vector<const char> constructor_name,
-                              int argc, Object*** argv,
+                              int argc,
+                              Handle<Object> argv[],
                               bool* caught_exception);
   Handle<Object> MakeExecutionState(bool* caught_exception);
   Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@@ -811,11 +810,15 @@
   }
 
   void set_compiling_natives(bool compiling_natives) {
-    Debugger::compiling_natives_ = compiling_natives;
+    compiling_natives_ = compiling_natives;
   }
   bool compiling_natives() const { return compiling_natives_; }
   void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
   bool is_loading_debugger() const { return is_loading_debugger_; }
+  void set_force_debugger_active(bool force_debugger_active) {
+    force_debugger_active_ = force_debugger_active;
+  }
+  bool force_debugger_active() const { return force_debugger_active_; }
 
   bool IsDebuggerActive();
 
@@ -841,6 +844,7 @@
   bool compiling_natives_;  // Are we compiling natives?
   bool is_loading_debugger_;  // Are we loading the debugger?
   bool never_unload_debugger_;  // Can we unload the debugger?
+  bool force_debugger_active_;  // Activate debugger without event listeners.
   v8::Debug::MessageHandler2 message_handler_;
   bool debugger_unload_pending_;  // Was message handler cleared?
   v8::Debug::HostDispatchHandler host_dispatch_handler_;
@@ -871,91 +875,8 @@
 // some reason could not be entered FailedToEnter will return true.
 class EnterDebugger BASE_EMBEDDED {
  public:
-  EnterDebugger()
-      : isolate_(Isolate::Current()),
-        prev_(isolate_->debug()->debugger_entry()),
-        it_(isolate_),
-        has_js_frames_(!it_.done()),
-        save_(isolate_) {
-    Debug* debug = isolate_->debug();
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
-    // Link recursive debugger entry.
-    debug->set_debugger_entry(this);
-
-    // Store the previous break id and frame id.
-    break_id_ = debug->break_id();
-    break_frame_id_ = debug->break_frame_id();
-
-    // Create the new break info. If there is no JavaScript frames there is no
-    // break frame id.
-    if (has_js_frames_) {
-      debug->NewBreak(it_.frame()->id());
-    } else {
-      debug->NewBreak(StackFrame::NO_ID);
-    }
-
-    // Make sure that debugger is loaded and enter the debugger context.
-    load_failed_ = !debug->Load();
-    if (!load_failed_) {
-      // NOTE the member variable save which saves the previous context before
-      // this change.
-      isolate_->set_context(*debug->debug_context());
-    }
-  }
-
-  ~EnterDebugger() {
-    ASSERT(Isolate::Current() == isolate_);
-    Debug* debug = isolate_->debug();
-
-    // Restore to the previous break state.
-    debug->SetBreak(break_frame_id_, break_id_);
-
-    // Check for leaving the debugger.
-    if (prev_ == NULL) {
-      // Clear mirror cache when leaving the debugger. Skip this if there is a
-      // pending exception as clearing the mirror cache calls back into
-      // JavaScript. This can happen if the v8::Debug::Call is used in which
-      // case the exception should end up in the calling code.
-      if (!isolate_->has_pending_exception()) {
-        // Try to avoid any pending debug break breaking in the clear mirror
-        // cache JavaScript code.
-        if (isolate_->stack_guard()->IsDebugBreak()) {
-          debug->set_interrupts_pending(DEBUGBREAK);
-          isolate_->stack_guard()->Continue(DEBUGBREAK);
-        }
-        debug->ClearMirrorCache();
-      }
-
-      // Request preemption and debug break when leaving the last debugger entry
-      // if any of these where recorded while debugging.
-      if (debug->is_interrupt_pending(PREEMPT)) {
-        // This re-scheduling of preemption is to avoid starvation in some
-        // debugging scenarios.
-        debug->clear_interrupt_pending(PREEMPT);
-        isolate_->stack_guard()->Preempt();
-      }
-      if (debug->is_interrupt_pending(DEBUGBREAK)) {
-        debug->clear_interrupt_pending(DEBUGBREAK);
-        isolate_->stack_guard()->DebugBreak();
-      }
-
-      // If there are commands in the queue when leaving the debugger request
-      // that these commands are processed.
-      if (isolate_->debugger()->HasCommands()) {
-        isolate_->stack_guard()->DebugCommand();
-      }
-
-      // If leaving the debugger with the debugger no longer active unload it.
-      if (!isolate_->debugger()->IsDebuggerActive()) {
-        isolate_->debugger()->UnloadDebugger();
-      }
-    }
-
-    // Leaving this debugger entry.
-    debug->set_debugger_entry(prev_);
-  }
+  EnterDebugger();
+  ~EnterDebugger();
 
   // Check whether the debugger could be entered.
   inline bool FailedToEnter() { return load_failed_; }
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 0ada28b..cb24b10 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -52,11 +52,13 @@
 
 DeoptimizerData::~DeoptimizerData() {
   if (eager_deoptimization_entry_code_ != NULL) {
-    eager_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        eager_deoptimization_entry_code_);
     eager_deoptimization_entry_code_ = NULL;
   }
   if (lazy_deoptimization_entry_code_ != NULL) {
-    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        lazy_deoptimization_entry_code_);
     lazy_deoptimization_entry_code_ = NULL;
   }
 }
@@ -71,6 +73,8 @@
 #endif
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 Deoptimizer* Deoptimizer::New(JSFunction* function,
                               BailoutType type,
                               unsigned bailout_id,
@@ -260,11 +264,16 @@
   AssertNoAllocation no_allocation;
 
   // Run through the list of all global contexts and deoptimize.
-  Object* global = Isolate::Current()->heap()->global_contexts_list();
-  while (!global->IsUndefined()) {
-    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
-                                              visitor);
-    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
+  Object* context = Isolate::Current()->heap()->global_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the global field of the context can be undefined.
+    Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
+    if (!global->IsUndefined()) {
+      VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
+                                                visitor);
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
@@ -305,6 +314,8 @@
       input_(NULL),
       output_count_(0),
       output_(NULL),
+      frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
+      has_alignment_padding_(0),
       deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
     if (type == DEBUGGER) {
@@ -329,6 +340,26 @@
   if (type == EAGER) {
     ASSERT(from == NULL);
     optimized_code_ = function_->code();
+    if (FLAG_trace_deopt && FLAG_code_comments) {
+      // Print instruction associated with this bailout.
+      const char* last_comment = NULL;
+      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+        RelocInfo* info = it.rinfo();
+        if (info->rmode() == RelocInfo::COMMENT) {
+          last_comment = reinterpret_cast<const char*>(info->data());
+        }
+        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
+          unsigned id = Deoptimizer::GetDeoptimizationId(
+              info->target_address(), Deoptimizer::EAGER);
+          if (id == bailout_id && last_comment != NULL) {
+            PrintF("            %s\n", last_comment);
+            break;
+          }
+        }
+      }
+    }
   } else if (type == LAZY) {
     optimized_code_ = FindDeoptimizingCodeFromAddress(from);
     ASSERT(optimized_code_ != NULL);
@@ -372,7 +403,7 @@
 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
   ASSERT(id >= 0);
   if (id >= kNumberOfEntries) return NULL;
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -386,12 +417,12 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
 }
 
 
 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     base = data->eager_deoptimization_entry_code_;
@@ -399,14 +430,14 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->GetStartAddress() ||
-      addr >= base->GetStartAddress() +
+      addr < base->area_start() ||
+      addr >= base->area_start() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
-  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
+      static_cast<int>(addr - base->area_start()) % table_entry_size_);
+  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
 }
 
 
@@ -448,6 +479,8 @@
 }
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 void Deoptimizer::DoComputeOutputFrames() {
   if (bailout_type_ == OSR) {
     DoComputeOsrOutputFrame();
@@ -599,11 +632,13 @@
       intptr_t input_value = input_->GetRegister(input_reg);
       if (FLAG_trace_deopt) {
         PrintF(
-            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
             output_[frame_index]->GetTop() + output_offset,
             output_offset,
             input_value,
             converter.NameOfCPURegister(input_reg));
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -661,10 +696,12 @@
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
-        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
                output_offset,
                input_value,
                input_offset);
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -836,10 +873,12 @@
       unsigned output_offset =
           output->GetOffsetFromSlotIndex(this, output_index);
       if (FLAG_trace_osr) {
-        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
+        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
                output_offset,
                input_value,
                *input_offset);
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output->SetFrameSlot(output_offset, input_value);
       break;
@@ -939,7 +978,10 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
+    PatchStackCheckCodeAt(unoptimized_code,
+                          pc_after,
+                          check_code,
+                          replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -958,7 +1000,10 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
+    RevertStackCheckCodeAt(unoptimized_code,
+                           pc_after,
+                           check_code,
+                           replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -1025,7 +1070,7 @@
 }
 
 
-LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
   // We cannot run this if the serializer is enabled because this will
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
@@ -1039,12 +1084,16 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  MemoryChunk* chunk =
+      Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+                                                            EXECUTABLE,
+                                                            NULL);
+  ASSERT(chunk->area_size() >= desc.instr_size);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->area_start(), desc.instr_size);
   return chunk;
 }
 
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 8641261..284676c 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -86,8 +86,8 @@
 #endif
 
  private:
-  LargeObjectChunk* eager_deoptimization_entry_code_;
-  LargeObjectChunk* lazy_deoptimization_entry_code_;
+  MemoryChunk* eager_deoptimization_entry_code_;
+  MemoryChunk* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -173,7 +173,8 @@
 
   // Patch stack guard check at instruction before pc_after in
   // the unoptimized code to unconditionally call replacement_code.
-  static void PatchStackCheckCodeAt(Address pc_after,
+  static void PatchStackCheckCodeAt(Code* unoptimized_code,
+                                    Address pc_after,
                                     Code* check_code,
                                     Code* replacement_code);
 
@@ -185,7 +186,8 @@
 
   // Change all patched stack guard checks in the unoptimized code
   // back to a normal stack guard check.
-  static void RevertStackCheckCodeAt(Address pc_after,
+  static void RevertStackCheckCodeAt(Code* unoptimized_code,
+                                     Address pc_after,
                                      Code* check_code,
                                      Code* replacement_code);
 
@@ -211,6 +213,11 @@
     return OFFSET_OF(Deoptimizer, output_count_);
   }
   static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+  static int frame_alignment_marker_offset() {
+    return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
+  static int has_alignment_padding_offset() {
+    return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+  }
 
   static int GetDeoptimizedCodeCount(Isolate* isolate);
 
@@ -285,7 +292,7 @@
 
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static LargeObjectChunk* CreateCode(BailoutType type);
+  static MemoryChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -315,6 +322,10 @@
   // Array of output frame descriptions.
   FrameDescription** output_;
 
+  // Frames can be dynamically padded on ia32 to align untagged doubles.
+  Object* frame_alignment_marker_;
+  intptr_t has_alignment_padding_;
+
   List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
   static const int table_entry_size_;
@@ -358,7 +369,20 @@
   }
 
   double GetDoubleFrameSlot(unsigned offset) {
-    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+    intptr_t* ptr = GetFrameSlotPointer(offset);
+#if V8_TARGET_ARCH_MIPS
+    // Prevent gcc from using load-double (mips ldc1) on (possibly)
+    // non-64-bit aligned double. Uses two lwc1 instructions.
+    union conversion {
+      double d;
+      uint32_t u[2];
+    } c;
+    c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
+    c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
+    return c.d;
+#else
+    return *reinterpret_cast<double*>(ptr);
+#endif
   }
 
   void SetFrameSlot(unsigned offset, intptr_t value) {
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 1e67b4c..e3b40ab 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -200,7 +200,7 @@
     // Print all the reloc info for this instruction which are not comments.
     for (int i = 0; i < pcs.length(); i++) {
       // Put together the reloc info
-      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
 
       // Indent the printing of the reloc info.
       if (i == 0) {
diff --git a/src/double.h b/src/double.h
index 65eded9..16a3245 100644
--- a/src/double.h
+++ b/src/double.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,8 +34,8 @@
 namespace internal {
 
 // We assume that doubles and uint64_t have the same endianness.
-static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
 
 // Helper functions for doubles.
 class Double {
diff --git a/src/dtoa.h b/src/dtoa.h
index b3e79af..a2d6fde 100644
--- a/src/dtoa.h
+++ b/src/dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 // The maximal length of digits a double can have in base 10.
 // Note that DoubleToAscii null-terminates its input. So the given buffer should
 // be at least kBase10MaximalLength + 1 characters long.
-static const int kBase10MaximalLength = 17;
+const int kBase10MaximalLength = 17;
 
 // Converts the given double 'v' to ascii.
 // The result should be interpreted as buffer * 10^(point-length).
diff --git a/src/elements.cc b/src/elements.cc
index 0454644..49ecd88 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -31,6 +31,30 @@
 #include "elements.h"
 #include "utils.h"
 
+
+// Each concrete ElementsAccessor can handle exactly one ElementsKind,
+// several abstract ElementsAccessor classes are used to allow sharing
+// common code.
+//
+// Inheritance hierarchy:
+// - ElementsAccessorBase                        (abstract)
+//   - FastElementsAccessor                      (abstract)
+//     - FastObjectElementsAccessor
+//     - FastDoubleElementsAccessor
+//   - ExternalElementsAccessor                  (abstract)
+//     - ExternalByteElementsAccessor
+//     - ExternalUnsignedByteElementsAccessor
+//     - ExternalShortElementsAccessor
+//     - ExternalUnsignedShortElementsAccessor
+//     - ExternalIntElementsAccessor
+//     - ExternalUnsignedIntElementsAccessor
+//     - ExternalFloatElementsAccessor
+//     - ExternalDoubleElementsAccessor
+//     - PixelElementsAccessor
+//   - DictionaryElementsAccessor
+//   - NonStrictArgumentsElementsAccessor
+
+
 namespace v8 {
 namespace internal {
 
@@ -38,7 +62,7 @@
 ElementsAccessor** ElementsAccessor::elements_accessors_;
 
 
-bool HasKey(FixedArray* array, Object* key) {
+static bool HasKey(FixedArray* array, Object* key) {
   int len0 = array->length();
   for (int i = 0; i < len0; i++) {
     Object* element = array->get(i);
@@ -52,6 +76,14 @@
 }
 
 
+static Failure* ThrowArrayLengthRangeError(Heap* heap) {
+  HandleScope scope(heap->isolate());
+  return heap->isolate()->Throw(
+      *heap->isolate()->factory()->NewRangeError("invalid_array_length",
+          HandleVector<Object>(NULL, 0)));
+}
+
+
 // Base class for element handler implementations. Contains the
 // the common logic for objects with different ElementsKinds.
 // Subclasses must specialize method for which the element
@@ -77,20 +109,30 @@
                            uint32_t key,
                            JSObject* obj,
                            Object* receiver) {
-    return ElementsAccessorSubclass::Get(
+    return ElementsAccessorSubclass::GetImpl(
         BackingStoreClass::cast(backing_store), key, obj, receiver);
   }
 
-  static MaybeObject* Get(BackingStoreClass* backing_store,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
-    if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) {
-      return backing_store->get(key);
-    }
-    return backing_store->GetHeap()->the_hole_value();
+  static MaybeObject* GetImpl(BackingStoreClass* backing_store,
+                              uint32_t key,
+                              JSObject* obj,
+                              Object* receiver) {
+    return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
+           ? backing_store->get(key)
+           : backing_store->GetHeap()->the_hole_value();
   }
 
+  virtual MaybeObject* SetLength(JSObject* obj,
+                                 Object* length) {
+    ASSERT(obj->IsJSArray());
+    return ElementsAccessorSubclass::SetLengthImpl(
+        BackingStoreClass::cast(obj->elements()), obj, length);
+  }
+
+  static MaybeObject* SetLengthImpl(BackingStoreClass* backing_store,
+                                    JSObject* obj,
+                                    Object* length);
+
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
@@ -108,7 +150,7 @@
     }
 #endif
     BackingStoreClass* backing_store = BackingStoreClass::cast(from);
-    uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store);
+    uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
 
     // Optimize if 'other' is empty.
     // We cannot optimize if 'this' is empty, as other may have holes.
@@ -117,14 +159,13 @@
     // Compute how many elements are not in other.
     int extra = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
-                                                      y,
-                                                      holder,
-                                                      receiver)) {
+      if (ElementsAccessorSubclass::HasElementAtIndexImpl(
+          backing_store, y, holder, receiver)) {
         uint32_t key =
-            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+            ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+            ElementsAccessorSubclass::GetImpl(backing_store, key,
+                                              holder, receiver);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         ASSERT(!value->IsTheHole());
@@ -155,14 +196,13 @@
     // Fill in the extra values.
     int index = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
-                                                      y,
-                                                      holder,
-                                                      receiver)) {
+      if (ElementsAccessorSubclass::HasElementAtIndexImpl(
+          backing_store, y, holder, receiver)) {
         uint32_t key =
-            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
+            ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
+            ElementsAccessorSubclass::GetImpl(backing_store, key,
+                                              holder, receiver);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         if (!value->IsTheHole() && !HasKey(to, value)) {
@@ -176,25 +216,23 @@
   }
 
  protected:
-  static uint32_t GetCapacity(BackingStoreClass* backing_store) {
+  static uint32_t GetCapacityImpl(BackingStoreClass* backing_store) {
     return backing_store->length();
   }
 
   virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
-    return ElementsAccessorSubclass::GetCapacity(
+    return ElementsAccessorSubclass::GetCapacityImpl(
         BackingStoreClass::cast(backing_store));
   }
 
-  static bool HasElementAtIndex(BackingStoreClass* backing_store,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
+  static bool HasElementAtIndexImpl(BackingStoreClass* backing_store,
+                                    uint32_t index,
+                                    JSObject* holder,
+                                    Object* receiver) {
     uint32_t key =
-        ElementsAccessorSubclass::GetKeyForIndex(backing_store, index);
-    MaybeObject* element = ElementsAccessorSubclass::Get(backing_store,
-                                                         key,
-                                                         holder,
-                                                         receiver);
+        ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
+    MaybeObject* element =
+        ElementsAccessorSubclass::GetImpl(backing_store, key, holder, receiver);
     return !element->IsTheHole();
   }
 
@@ -202,18 +240,18 @@
                                  uint32_t index,
                                  JSObject* holder,
                                  Object* receiver) {
-    return ElementsAccessorSubclass::HasElementAtIndex(
+    return ElementsAccessorSubclass::HasElementAtIndexImpl(
         BackingStoreClass::cast(backing_store), index, holder, receiver);
   }
 
-  static uint32_t GetKeyForIndex(BackingStoreClass* backing_store,
-                                 uint32_t index) {
+  static uint32_t GetKeyForIndexImpl(BackingStoreClass* backing_store,
+                                     uint32_t index) {
     return index;
   }
 
   virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
                                               uint32_t index) {
-    return ElementsAccessorSubclass::GetKeyForIndex(
+    return ElementsAccessorSubclass::GetKeyForIndexImpl(
         BackingStoreClass::cast(backing_store), index);
   }
 
@@ -222,12 +260,76 @@
 };
 
 
+// Super class for all fast element arrays.
+template<typename FastElementsAccessorSubclass,
+         typename BackingStore,
+         int ElementSize>
 class FastElementsAccessor
-    : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
+    : public ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore> {
+ protected:
+  friend class ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore>;
+
+  // Adjusts the length of the fast backing store or returns the new length or
+  // undefined in case conversion to a slow backing store should be performed.
+  static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+                                                JSArray* array,
+                                                Object* length_object,
+                                                uint32_t length) {
+    uint32_t old_capacity = backing_store->length();
+
+    // Check whether the backing store should be shrunk.
+    if (length <= old_capacity) {
+      if (array->HasFastTypeElements()) {
+        MaybeObject* maybe_obj = array->EnsureWritableFastElements();
+        if (!maybe_obj->To(&backing_store)) return maybe_obj;
+      }
+      if (2 * length <= old_capacity) {
+        // If more than half the elements won't be used, trim the array.
+        if (length == 0) {
+          array->initialize_elements();
+        } else {
+          backing_store->set_length(length);
+          Address filler_start = backing_store->address() +
+              BackingStore::OffsetOfElementAt(length);
+          int filler_size = (old_capacity - length) * ElementSize;
+          array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+        }
+      } else {
+        // Otherwise, fill the unused tail with holes.
+        int old_length = FastD2I(array->length()->Number());
+        for (int i = length; i < old_length; i++) {
+          backing_store->set_the_hole(i);
+        }
+      }
+      return length_object;
+    }
+
+    // Check whether the backing store should be expanded.
+    uint32_t min = JSObject::NewElementsCapacity(old_capacity);
+    uint32_t new_capacity = length > min ? length : min;
+    if (!array->ShouldConvertToSlowElements(new_capacity)) {
+      MaybeObject* result = FastElementsAccessorSubclass::
+          SetFastElementsCapacityAndLength(array, new_capacity, length);
+      if (result->IsFailure()) return result;
+      return length_object;
+    }
+
+    // Request conversion to slow elements.
+    return array->GetHeap()->undefined_value();
+  }
+};
+
+
+class FastObjectElementsAccessor
+    : public FastElementsAccessor<FastObjectElementsAccessor,
+                                  FixedArray,
+                                  kPointerSize> {
  public:
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key) {
-    ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
+    ASSERT(obj->HasFastElements() ||
+           obj->HasFastSmiOnlyElements() ||
+           obj->HasFastArgumentsElements());
     Heap* heap = obj->GetHeap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
     if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -270,6 +372,22 @@
   }
 
  protected:
+  friend class FastElementsAccessor<FastObjectElementsAccessor,
+                                    FixedArray,
+                                    kPointerSize>;
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       uint32_t capacity,
+                                                       uint32_t length) {
+    JSObject::SetFastElementsCapacityMode set_capacity_mode =
+        obj->HasFastSmiOnlyElements()
+            ? JSObject::kAllowSmiOnlyElements
+            : JSObject::kDontAllowSmiOnlyElements;
+    return obj->SetFastElementsCapacityAndLength(capacity,
+                                                 length,
+                                                 set_capacity_mode);
+  }
+
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) {
@@ -279,11 +397,21 @@
 
 
 class FastDoubleElementsAccessor
-    : public ElementsAccessorBase<FastDoubleElementsAccessor,
-                                  FixedDoubleArray> {
+    : public FastElementsAccessor<FastDoubleElementsAccessor,
+                                  FixedDoubleArray,
+                                  kDoubleSize> {
  protected:
   friend class ElementsAccessorBase<FastDoubleElementsAccessor,
                                     FixedDoubleArray>;
+  friend class FastElementsAccessor<FastDoubleElementsAccessor,
+                                    FixedDoubleArray,
+                                    kDoubleSize>;
+
+  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+                                                       uint32_t capacity,
+                                                       uint32_t length) {
+    return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+  }
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -297,10 +425,10 @@
     return obj->GetHeap()->true_value();
   }
 
-  static bool HasElementAtIndex(FixedDoubleArray* backing_store,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
+  static bool HasElementAtIndexImpl(FixedDoubleArray* backing_store,
+                                    uint32_t index,
+                                    JSObject* holder,
+                                    Object* receiver) {
     return !backing_store->is_the_hole(index);
   }
 };
@@ -316,15 +444,22 @@
   friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
                                     ExternalArray>;
 
-  static MaybeObject* Get(ExternalArray* backing_store,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
-    if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) {
-      return backing_store->get(key);
-    } else {
-      return backing_store->GetHeap()->undefined_value();
-    }
+  static MaybeObject* GetImpl(ExternalArray* backing_store,
+                              uint32_t key,
+                              JSObject* obj,
+                              Object* receiver) {
+    return
+        key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+        ? backing_store->get(key)
+        : backing_store->GetHeap()->undefined_value();
+  }
+
+  static MaybeObject* SetLengthImpl(ExternalArray* backing_store,
+                                    JSObject* obj,
+                                    Object* length) {
+    // External arrays do not support changing their length.
+    UNREACHABLE();
+    return obj;
   }
 
   virtual MaybeObject* Delete(JSObject* obj,
@@ -392,8 +527,65 @@
 
 class DictionaryElementsAccessor
     : public ElementsAccessorBase<DictionaryElementsAccessor,
-                                  SeededNumberDictionary> {
+                                  NumberDictionary> {
  public:
+  // Adjusts the length of the dictionary backing store and returns the new
+  // length according to ES5 section 15.4.5.2 behavior.
+  static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict,
+                                                JSArray* array,
+                                                Object* length_object,
+                                                uint32_t length) {
+    if (length == 0) {
+      // If the length of a slow array is reset to zero, we clear
+      // the array and flush backing storage. This has the added
+      // benefit that the array returns to fast mode.
+      Object* obj;
+      MaybeObject* maybe_obj = array->ResetElements();
+      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    } else {
+      uint32_t new_length = length;
+      uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
+      if (new_length < old_length) {
+        // Find last non-deletable element in range of elements to be
+        // deleted and adjust range accordingly.
+        Heap* heap = array->GetHeap();
+        int capacity = dict->Capacity();
+        for (int i = 0; i < capacity; i++) {
+          Object* key = dict->KeyAt(i);
+          if (key->IsNumber()) {
+            uint32_t number = static_cast<uint32_t>(key->Number());
+            if (new_length <= number && number < old_length) {
+              PropertyDetails details = dict->DetailsAt(i);
+              if (details.IsDontDelete()) new_length = number + 1;
+            }
+          }
+        }
+        if (new_length != length) {
+          MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
+          if (!maybe_object->To(&length_object)) return maybe_object;
+        }
+
+        // Remove elements that should be deleted.
+        int removed_entries = 0;
+        Object* the_hole_value = heap->the_hole_value();
+        for (int i = 0; i < capacity; i++) {
+          Object* key = dict->KeyAt(i);
+          if (key->IsNumber()) {
+            uint32_t number = static_cast<uint32_t>(key->Number());
+            if (new_length <= number && number < old_length) {
+              dict->SetEntry(i, the_hole_value, the_hole_value);
+              removed_entries++;
+            }
+          }
+        }
+
+        // Update the number of elements.
+        dict->ElementsRemoved(removed_entries);
+      }
+    }
+    return length_object;
+  }
+
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key,
                                    JSReceiver::DeleteMode mode) {
@@ -405,10 +597,9 @@
     if (is_arguments) {
       backing_store = FixedArray::cast(backing_store->get(1));
     }
-    SeededNumberDictionary* dictionary =
-        SeededNumberDictionary::cast(backing_store);
+    NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
     int entry = dictionary->FindEntry(key);
-    if (entry != SeededNumberDictionary::kNotFound) {
+    if (entry != NumberDictionary::kNotFound) {
       Object* result = dictionary->DeleteProperty(entry, mode);
       if (result == heap->true_value()) {
         MaybeObject* maybe_elements = dictionary->Shrink(key);
@@ -441,7 +632,7 @@
 
  protected:
   friend class ElementsAccessorBase<DictionaryElementsAccessor,
-                                    SeededNumberDictionary>;
+                                    NumberDictionary>;
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -449,12 +640,12 @@
     return DeleteCommon(obj, key, mode);
   }
 
-  static MaybeObject* Get(SeededNumberDictionary* backing_store,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
+  static MaybeObject* GetImpl(NumberDictionary* backing_store,
+                              uint32_t key,
+                              JSObject* obj,
+                              Object* receiver) {
     int entry = backing_store->FindEntry(key);
-    if (entry != SeededNumberDictionary::kNotFound) {
+    if (entry != NumberDictionary::kNotFound) {
       Object* element = backing_store->ValueAt(entry);
       PropertyDetails details = backing_store->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
@@ -469,8 +660,8 @@
     return obj->GetHeap()->the_hole_value();
   }
 
-  static uint32_t GetKeyForIndex(SeededNumberDictionary* dict,
-                                 uint32_t index) {
+  static uint32_t GetKeyForIndexImpl(NumberDictionary* dict,
+                                     uint32_t index) {
     Object* key = dict->KeyAt(index);
     return Smi::cast(key)->value();
   }
@@ -484,10 +675,10 @@
   friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
                                     FixedArray>;
 
-  static MaybeObject* Get(FixedArray* parameter_map,
-                          uint32_t key,
-                          JSObject* obj,
-                          Object* receiver) {
+  static MaybeObject* GetImpl(FixedArray* parameter_map,
+                              uint32_t key,
+                              JSObject* obj,
+                              Object* receiver) {
     Object* probe = GetParameterMapArg(parameter_map, key);
     if (!probe->IsTheHole()) {
       Context* context = Context::cast(parameter_map->get(0));
@@ -504,9 +695,17 @@
     }
   }
 
+  static MaybeObject* SetLengthImpl(FixedArray* parameter_map,
+                                    JSObject* obj,
+                                    Object* length) {
+    // TODO(mstarzinger): This was never implemented but will be used once we
+    // correctly implement [[DefineOwnProperty]] on arrays.
+    UNIMPLEMENTED();
+    return obj;
+  }
+
   virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key
-                              ,
+                              uint32_t key,
                               JSReceiver::DeleteMode mode) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
     Object* probe = GetParameterMapArg(parameter_map, key);
@@ -520,27 +719,27 @@
       if (arguments->IsDictionary()) {
         return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
       } else {
-        return FastElementsAccessor::DeleteCommon(obj, key);
+        return FastObjectElementsAccessor::DeleteCommon(obj, key);
       }
     }
     return obj->GetHeap()->true_value();
   }
 
-  static uint32_t GetCapacity(FixedArray* parameter_map) {
+  static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
     return Max(static_cast<uint32_t>(parameter_map->length() - 2),
                ForArray(arguments)->GetCapacity(arguments));
   }
 
-  static uint32_t GetKeyForIndex(FixedArray* dict,
-                                 uint32_t index) {
+  static uint32_t GetKeyForIndexImpl(FixedArray* dict,
+                                     uint32_t index) {
     return index;
   }
 
-  static bool HasElementAtIndex(FixedArray* parameter_map,
-                                uint32_t index,
-                                JSObject* holder,
-                                Object* receiver) {
+  static bool HasElementAtIndexImpl(FixedArray* parameter_map,
+                                    uint32_t index,
+                                    JSObject* holder,
+                                    Object* receiver) {
     Object* probe = GetParameterMapArg(parameter_map, index);
     if (!probe->IsTheHole()) {
       return true;
@@ -596,40 +795,108 @@
 
 
 void ElementsAccessor::InitializeOncePerProcess() {
-  static struct ConcreteElementsAccessors {
-    FastElementsAccessor fast_elements_handler;
-    FastDoubleElementsAccessor fast_double_elements_handler;
-    DictionaryElementsAccessor dictionary_elements_handler;
-    NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
-    ExternalByteElementsAccessor byte_elements_handler;
-    ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
-    ExternalShortElementsAccessor short_elements_handler;
-    ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
-    ExternalIntElementsAccessor int_elements_handler;
-    ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
-    ExternalFloatElementsAccessor float_elements_handler;
-    ExternalDoubleElementsAccessor double_elements_handler;
-    PixelElementsAccessor pixel_elements_handler;
-  } element_accessors;
+  // First argument in list is the accessor class, the second argument is can
+  // be any arbitrary unique identifier, in this case chosen to be the
+  // corresponding enum.  Use the fast element handler for smi-only arrays.
+  // The implementation is currently identical.  Note that the order must match
+  // that of the ElementsKind enum for the |accessor_array[]| below to work.
+#define ELEMENTS_LIST(V)                                                       \
+  V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS)                        \
+  V(FastObjectElementsAccessor, FAST_ELEMENTS)                                 \
+  V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS)                          \
+  V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS)                           \
+  V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS)         \
+  V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS)                      \
+  V(ExternalUnsignedByteElementsAccessor, EXTERNAL_UNSIGNED_BYTE_ELEMENTS)     \
+  V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS)                    \
+  V(ExternalUnsignedShortElementsAccessor, EXTERNAL_UNSIGNED_SHORT_ELEMENTS)   \
+  V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS)                        \
+  V(ExternalUnsignedIntElementsAccessor, EXTERNAL_UNSIGNED_INT_ELEMENTS)       \
+  V(ExternalFloatElementsAccessor, EXTERNAL_FLOAT_ELEMENTS)                    \
+  V(ExternalDoubleElementsAccessor, EXTERNAL_DOUBLE_ELEMENTS)                  \
+  V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS)
 
-  static ElementsAccessor* accessor_array[] = {
-    &element_accessors.fast_elements_handler,
-    &element_accessors.fast_double_elements_handler,
-    &element_accessors.dictionary_elements_handler,
-    &element_accessors.non_strict_arguments_elements_handler,
-    &element_accessors.byte_elements_handler,
-    &element_accessors.unsigned_byte_elements_handler,
-    &element_accessors.short_elements_handler,
-    &element_accessors.unsigned_short_elements_handler,
-    &element_accessors.int_elements_handler,
-    &element_accessors.unsigned_int_elements_handler,
-    &element_accessors.float_elements_handler,
-    &element_accessors.double_elements_handler,
-    &element_accessors.pixel_elements_handler
+  static struct ConcreteElementsAccessors {
+#define ACCESSOR_STRUCT(Class, Name) Class* Name##_handler;
+    ELEMENTS_LIST(ACCESSOR_STRUCT)
+#undef ACCESSOR_STRUCT
+  } element_accessors = {
+#define ACCESSOR_INIT(Class, Name) new Class(),
+    ELEMENTS_LIST(ACCESSOR_INIT)
+#undef ACCESSOR_INIT
   };
 
+  static ElementsAccessor* accessor_array[] = {
+#define ACCESSOR_ARRAY(Class, Name) element_accessors.Name##_handler,
+    ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
+  };
+
+#undef ELEMENTS_LIST
+
+  STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+                kElementsKindCount);
+
   elements_accessors_ = accessor_array;
 }
 
 
+template <typename ElementsAccessorSubclass, typename BackingStoreClass>
+MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
+    SetLengthImpl(BackingStoreClass* backing_store,
+                  JSObject* obj,
+                  Object* length) {
+  JSArray* array = JSArray::cast(obj);
+
+  // Fast case: The new length fits into a Smi.
+  MaybeObject* maybe_smi_length = length->ToSmi();
+  Object* smi_length = Smi::FromInt(0);
+  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
+    const int value = Smi::cast(smi_length)->value();
+    if (value >= 0) {
+      Object* new_length;
+      MaybeObject* result = ElementsAccessorSubclass::
+          SetLengthWithoutNormalize(backing_store, array, smi_length, value);
+      if (!result->ToObject(&new_length)) return result;
+      ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+      if (new_length->IsSmi()) {
+        array->set_length(Smi::cast(new_length));
+        return array;
+      }
+    } else {
+      return ThrowArrayLengthRangeError(array->GetHeap());
+    }
+  }
+
+  // Slow case: The new length does not fit into a Smi or conversion
+  // to slow elements is needed for other reasons.
+  if (length->IsNumber()) {
+    uint32_t value;
+    if (length->ToArrayIndex(&value)) {
+      NumberDictionary* dictionary;
+      MaybeObject* maybe_object = array->NormalizeElements();
+      if (!maybe_object->To(&dictionary)) return maybe_object;
+      Object* new_length;
+      MaybeObject* result = DictionaryElementsAccessor::
+          SetLengthWithoutNormalize(dictionary, array, length, value);
+      if (!result->ToObject(&new_length)) return result;
+      ASSERT(new_length->IsNumber());
+      array->set_length(new_length);
+      return array;
+    } else {
+      return ThrowArrayLengthRangeError(array->GetHeap());
+    }
+  }
+
+  // Fall-back case: The new length is not a number so make the array
+  // size one and set only element to length.
+  FixedArray* new_backing_store;
+  MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
+  if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
+  new_backing_store->set(0, length);
+  array->SetContent(new_backing_store);
+  return array;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/elements.h b/src/elements.h
index 851c8c3..ed1ca5e 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -44,6 +44,11 @@
                            JSObject* holder,
                            Object* receiver) = 0;
 
+  // Modifies the length data property as specified for JSArrays and resizes
+  // the underlying backing store accordingly.
+  virtual MaybeObject* SetLength(JSObject* holder,
+                                 Object* new_length) = 0;
+
   virtual MaybeObject* Delete(JSObject* holder,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
diff --git a/src/execution.cc b/src/execution.cc
index f36d4e4..b16e739 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -33,6 +33,7 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "debug.h"
+#include "isolate-inl.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
@@ -65,13 +66,13 @@
 }
 
 
-static Handle<Object> Invoke(bool construct,
-                             Handle<JSFunction> func,
+static Handle<Object> Invoke(bool is_construct,
+                             Handle<JSFunction> function,
                              Handle<Object> receiver,
                              int argc,
-                             Object*** args,
+                             Handle<Object> args[],
                              bool* has_pending_exception) {
-  Isolate* isolate = func->GetIsolate();
+  Isolate* isolate = function->GetIsolate();
 
   // Entering JavaScript.
   VMState state(isolate, JS);
@@ -79,21 +80,15 @@
   // Placeholder for return value.
   MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
 
-  typedef Object* (*JSEntryFunction)(
-    byte* entry,
-    Object* function,
-    Object* receiver,
-    int argc,
-    Object*** args);
+  typedef Object* (*JSEntryFunction)(byte* entry,
+                                     Object* function,
+                                     Object* receiver,
+                                     int argc,
+                                     Object*** args);
 
-  Handle<Code> code;
-  if (construct) {
-    JSConstructEntryStub stub;
-    code = stub.GetCode();
-  } else {
-    JSEntryStub stub;
-    code = stub.GetCode();
-  }
+  Handle<Code> code = is_construct
+      ? isolate->factory()->js_construct_entry_code()
+      : isolate->factory()->js_entry_code();
 
   // Convert calls on global objects to be calls on the global
   // receiver instead to avoid having a 'this' pointer which refers
@@ -105,21 +100,22 @@
 
   // Make sure that the global object of the context we're about to
   // make the current one is indeed a global object.
-  ASSERT(func->context()->global()->IsGlobalObject());
+  ASSERT(function->context()->global()->IsGlobalObject());
 
   {
     // Save and restore context around invocation and block the
     // allocation of handles without explicit handle scopes.
     SaveContext save(isolate);
     NoHandleAllocation na;
-    JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+    JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    byte* entry_address = func->code()->entry();
-    JSFunction* function = *func;
-    Object* receiver_pointer = *receiver;
-    value = CALL_GENERATED_CODE(entry, entry_address, function,
-                                receiver_pointer, argc, args);
+    byte* function_entry = function->code()->entry();
+    JSFunction* func = *function;
+    Object* recv = *receiver;
+    Object*** argv = reinterpret_cast<Object***>(args);
+    value =
+        CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
   }
 
 #ifdef DEBUG
@@ -148,9 +144,11 @@
 Handle<Object> Execution::Call(Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
-                               Object*** args,
+                               Handle<Object> argv[],
                                bool* pending_exception,
                                bool convert_receiver) {
+  *pending_exception = false;
+
   if (!callable->IsJSFunction()) {
     callable = TryGetFunctionDelegate(callable, pending_exception);
     if (*pending_exception) return callable;
@@ -159,7 +157,7 @@
 
   // In non-strict mode, convert receiver.
   if (convert_receiver && !receiver->IsJSReceiver() &&
-      !func->shared()->native() && !func->shared()->strict_mode()) {
+      !func->shared()->native() && func->shared()->is_classic_mode()) {
     if (receiver->IsUndefined() || receiver->IsNull()) {
       Object* global = func->context()->global()->global_receiver();
       // Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -172,13 +170,15 @@
     if (*pending_exception) return callable;
   }
 
-  return Invoke(false, func, receiver, argc, args, pending_exception);
+  return Invoke(false, func, receiver, argc, argv, pending_exception);
 }
 
 
-Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
-                              Object*** args, bool* pending_exception) {
-  return Invoke(true, func, Isolate::Current()->global(), argc, args,
+Handle<Object> Execution::New(Handle<JSFunction> func,
+                              int argc,
+                              Handle<Object> argv[],
+                              bool* pending_exception) {
+  return Invoke(true, func, Isolate::Current()->global(), argc, argv,
                 pending_exception);
 }
 
@@ -186,7 +186,7 @@
 Handle<Object> Execution::TryCall(Handle<JSFunction> func,
                                   Handle<Object> receiver,
                                   int argc,
-                                  Object*** args,
+                                  Handle<Object> args[],
                                   bool* caught_exception) {
   // Enter a try-block while executing the JavaScript code. To avoid
   // duplicate error printing it must be non-verbose.  Also, to avoid
@@ -195,6 +195,7 @@
   v8::TryCatch catcher;
   catcher.SetVerbose(false);
   catcher.SetCaptureMessage(false);
+  *caught_exception = false;
 
   Handle<Object> result = Invoke(false, func, receiver, argc, args,
                                  caught_exception);
@@ -377,7 +378,7 @@
 
 bool StackGuard::IsInterrupted() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & INTERRUPT;
+  return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
 }
 
 
@@ -403,7 +404,7 @@
 
 bool StackGuard::IsTerminateExecution() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & TERMINATE;
+  return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
 }
 
 
@@ -416,7 +417,7 @@
 
 bool StackGuard::IsRuntimeProfilerTick() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+  return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
 }
 
 
@@ -433,6 +434,22 @@
 }
 
 
+bool StackGuard::IsGCRequest() {
+  ExecutionAccess access(isolate_);
+  return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
+}
+
+
+void StackGuard::RequestGC() {
+  ExecutionAccess access(isolate_);
+  thread_local_.interrupt_flags_ |= GC_REQUEST;
+  if (thread_local_.postpone_interrupts_nesting_ == 0) {
+    thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+    isolate_->heap()->SetStackLimits();
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access(isolate_);
@@ -555,14 +572,15 @@
 
 // --- C a l l s   t o   n a t i v e s ---
 
-#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception)            \
-  do {                                                                         \
-    Isolate* isolate = Isolate::Current();                                     \
-    Object** args[argc] = argv;                                                \
-    ASSERT(has_pending_exception != NULL);                                     \
-    return Call(isolate->name##_fun(),                                         \
-                isolate->js_builtins_object(), argc, args,                     \
-                has_pending_exception);                                        \
+#define RETURN_NATIVE_CALL(name, args, has_pending_exception)           \
+  do {                                                                  \
+    Isolate* isolate = Isolate::Current();                              \
+    Handle<Object> argv[] = args;                                       \
+    ASSERT(has_pending_exception != NULL);                              \
+    return Call(isolate->name##_fun(),                                  \
+                isolate->js_builtins_object(),                          \
+                ARRAY_SIZE(argv), argv,                                 \
+                has_pending_exception);                                 \
   } while (false)
 
 
@@ -583,44 +601,44 @@
 
 
 Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_number, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_string, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
   if (obj->IsSpecObject()) return obj;
-  RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_object, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_integer, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
 }
 
 
 Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
+  RETURN_NATIVE_CALL(to_int32, { obj }, exc);
 }
 
 
 Handle<Object> Execution::NewDate(double time, bool* exc) {
   Handle<Object> time_obj = FACTORY->NewNumber(time);
-  RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
+  RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
 }
 
 
@@ -657,7 +675,7 @@
 
   bool caught_exception;
   Handle<Object> index_object = factory->NewNumberFromInt(int_index);
-  Object** index_arg[] = { index_object.location() };
+  Handle<Object> index_arg[] = { index_object };
   Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
                                   string,
                                   ARRAY_SIZE(index_arg),
@@ -671,7 +689,8 @@
 
 
 Handle<JSFunction> Execution::InstantiateFunction(
-    Handle<FunctionTemplateInfo> data, bool* exc) {
+    Handle<FunctionTemplateInfo> data,
+    bool* exc) {
   Isolate* isolate = data->GetIsolate();
   // Fast case: see if the function has already been instantiated
   int serial_number = Smi::cast(data->serial_number())->value();
@@ -680,10 +699,12 @@
           GetElementNoExceptionThrown(serial_number);
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
-  Object** args[1] = { Handle<Object>::cast(data).location() };
-  Handle<Object> result =
-      Call(isolate->instantiate_fun(),
-           isolate->js_builtins_object(), 1, args, exc);
+  Handle<Object> args[] = { data };
+  Handle<Object> result = Call(isolate->instantiate_fun(),
+                               isolate->js_builtins_object(),
+                               ARRAY_SIZE(args),
+                               args,
+                               exc);
   if (*exc) return Handle<JSFunction>::null();
   return Handle<JSFunction>::cast(result);
 }
@@ -710,10 +731,12 @@
     ASSERT(!*exc);
     return Handle<JSObject>(JSObject::cast(result));
   } else {
-    Object** args[1] = { Handle<Object>::cast(data).location() };
-    Handle<Object> result =
-        Call(isolate->instantiate_fun(),
-             isolate->js_builtins_object(), 1, args, exc);
+    Handle<Object> args[] = { data };
+    Handle<Object> result = Call(isolate->instantiate_fun(),
+                                 isolate->js_builtins_object(),
+                                 ARRAY_SIZE(args),
+                                 args,
+                                 exc);
     if (*exc) return Handle<JSObject>::null();
     return Handle<JSObject>::cast(result);
   }
@@ -724,9 +747,12 @@
                                   Handle<Object> instance_template,
                                   bool* exc) {
   Isolate* isolate = Isolate::Current();
-  Object** args[2] = { instance.location(), instance_template.location() };
+  Handle<Object> args[] = { instance, instance_template };
   Execution::Call(isolate->configure_instance_fun(),
-                  isolate->js_builtins_object(), 2, args, exc);
+                  isolate->js_builtins_object(),
+                  ARRAY_SIZE(args),
+                  args,
+                  exc);
 }
 
 
@@ -735,16 +761,13 @@
                                             Handle<Object> pos,
                                             Handle<Object> is_global) {
   Isolate* isolate = fun->GetIsolate();
-  const int argc = 4;
-  Object** args[argc] = { recv.location(),
-                          Handle<Object>::cast(fun).location(),
-                          pos.location(),
-                          is_global.location() };
-  bool caught_exception = false;
-  Handle<Object> result =
-      TryCall(isolate->get_stack_trace_line_fun(),
-              isolate->js_builtins_object(), argc, args,
-              &caught_exception);
+  Handle<Object> args[] = { recv, fun, pos, is_global };
+  bool caught_exception;
+  Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
+                                  isolate->js_builtins_object(),
+                                  ARRAY_SIZE(args),
+                                  args,
+                                  &caught_exception);
   if (caught_exception || !result->IsString()) {
       return isolate->factory()->empty_symbol();
   }
@@ -852,6 +875,12 @@
 MaybeObject* Execution::HandleStackGuardInterrupt() {
   Isolate* isolate = Isolate::Current();
   StackGuard* stack_guard = isolate->stack_guard();
+
+  if (stack_guard->IsGCRequest()) {
+    isolate->heap()->CollectAllGarbage(false);
+    stack_guard->Continue(GC_REQUEST);
+  }
+
   isolate->counters()->stack_interrupts()->Increment();
   if (stack_guard->IsRuntimeProfilerTick()) {
     isolate->counters()->runtime_profiler_ticks()->Increment();
diff --git a/src/execution.h b/src/execution.h
index 5cd7141..f2d17d0 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,8 @@
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
   TERMINATE = 1 << 4,
-  RUNTIME_PROFILER_TICK = 1 << 5
+  RUNTIME_PROFILER_TICK = 1 << 5,
+  GC_REQUEST = 1 << 6
 };
 
 class Execution : public AllStatic {
@@ -60,7 +61,7 @@
   static Handle<Object> Call(Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
-                             Object*** args,
+                             Handle<Object> argv[],
                              bool* pending_exception,
                              bool convert_receiver = false);
 
@@ -73,7 +74,7 @@
   //
   static Handle<Object> New(Handle<JSFunction> func,
                             int argc,
-                            Object*** args,
+                            Handle<Object> argv[],
                             bool* pending_exception);
 
   // Call a function, just like Call(), but make sure to silently catch
@@ -83,7 +84,7 @@
   static Handle<Object> TryCall(Handle<JSFunction> func,
                                 Handle<Object> receiver,
                                 int argc,
-                                Object*** args,
+                                Handle<Object> argv[],
                                 bool* caught_exception);
 
   // ECMA-262 9.2
@@ -196,6 +197,8 @@
   bool IsDebugCommand();
   void DebugCommand();
 #endif
+  bool IsGCRequest();
+  void RequestGC();
   void Continue(InterruptFlag after_what);
 
   // This provides an asynchronous read of the stack limits for the current
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 3740c27..54c8cdc 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,19 +40,15 @@
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
-  bool compact = false;
-  // All allocation spaces other than NEW_SPACE have the same effect.
-  if (args.Length() >= 1 && args[0]->IsBoolean()) {
-    compact = args[0]->BooleanValue();
-  }
-  HEAP->CollectAllGarbage(compact);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
   return v8::Undefined();
 }
 
 
 void GCExtension::Register() {
-  static GCExtension gc_extension;
-  static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+  static GCExtension* gc_extension = NULL;
+  if (gc_extension == NULL) gc_extension = new GCExtension();
+  static v8::DeclareExtension gc_extension_declaration(gc_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 971f9f9..f1042a4 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -59,13 +59,13 @@
 }
 
 
-Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
-                                                PretenureFlag pretenure) {
+Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
+                                                      PretenureFlag pretenure) {
   ASSERT(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
-      FixedArray);
+      FixedDoubleArray);
 }
 
 
@@ -77,21 +77,19 @@
 }
 
 
-Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
-    int at_least_space_for) {
+Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
-                     SeededNumberDictionary::Allocate(at_least_space_for),
-                     SeededNumberDictionary);
+                     NumberDictionary::Allocate(at_least_space_for),
+                     NumberDictionary);
 }
 
 
-Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
-    int at_least_space_for) {
+Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
-                     UnseededNumberDictionary::Allocate(at_least_space_for),
-                     UnseededNumberDictionary);
+                     ObjectHashSet::Allocate(at_least_space_for),
+                     ObjectHashSet);
 }
 
 
@@ -244,7 +242,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -253,7 +251,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -305,7 +303,7 @@
 Handle<Context> Factory::NewBlockContext(
     Handle<JSFunction> function,
     Handle<Context> previous,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateBlockContext(*function,
@@ -414,10 +412,12 @@
 }
 
 
-Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+Handle<Map> Factory::NewMap(InstanceType type,
+                            int instance_size,
+                            ElementsKind elements_kind) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateMap(type, instance_size),
+      isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
       Map);
 }
 
@@ -465,23 +465,11 @@
 }
 
 
-Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
-}
-
-
 Handle<Map> Factory::GetElementsTransitionMap(
-    Handle<Map> src,
-    ElementsKind elements_kind,
-    bool safe_to_add_transition) {
+    Handle<JSObject> src,
+    ElementsKind elements_kind) {
   CALL_HEAP_FUNCTION(isolate(),
-                     src->GetElementsTransitionMap(elements_kind,
-                                                   safe_to_add_transition),
+                     src->GetElementsTransitionMap(elements_kind),
                      Map);
 }
 
@@ -491,6 +479,12 @@
 }
 
 
+Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
+    Handle<FixedDoubleArray> array) {
+  CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
+}
+
+
 Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
     Handle<SharedFunctionInfo> function_info,
     Handle<Map> function_map,
@@ -511,22 +505,26 @@
     PretenureFlag pretenure) {
   Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
       function_info,
-      function_info->strict_mode()
-          ? isolate()->strict_mode_function_map()
-          : isolate()->function_map(),
+      function_info->is_classic_mode()
+          ? isolate()->function_map()
+          : isolate()->strict_mode_function_map(),
       pretenure);
 
   result->set_context(*context);
-  int number_of_literals = function_info->num_literals();
-  Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
-  if (number_of_literals > 0) {
-    // Store the object, regexp and array functions in the literals
-    // array prefix.  These functions will be used when creating
-    // object, regexp and array literals in this function.
-    literals->set(JSFunction::kLiteralGlobalContextIndex,
-                  context->global_context());
+  if (!function_info->bound()) {
+    int number_of_literals = function_info->num_literals();
+    Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+    if (number_of_literals > 0) {
+      // Store the object, regexp and array functions in the literals
+      // array prefix.  These functions will be used when creating
+      // object, regexp and array literals in this function.
+      literals->set(JSFunction::kLiteralGlobalContextIndex,
+                    context->global_context());
+    }
+    result->set_literals(*literals);
+  } else {
+    result->set_function_bindings(isolate()->heap()->empty_fixed_array());
   }
-  result->set_literals(*literals);
   result->set_next_function_link(isolate()->heap()->undefined_value());
 
   if (V8::UseCrankshaft() &&
@@ -548,17 +546,19 @@
 }
 
 
-Handle<Object> Factory::NewNumberFromInt(int value) {
+Handle<Object> Factory::NewNumberFromInt(int32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromInt32(value), Object);
+      isolate()->heap()->NumberFromInt32(value, pretenure), Object);
 }
 
 
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromUint32(value), Object);
+      isolate()->heap()->NumberFromUint32(value, pretenure), Object);
 }
 
 
@@ -651,14 +651,16 @@
     return undefined_value();
   Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
   Handle<Object> type_obj = LookupAsciiSymbol(type);
-  Object** argv[2] = { type_obj.location(),
-                       Handle<Object>::cast(args).location() };
+  Handle<Object> argv[] = { type_obj, args };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-      isolate()->js_builtins_object(), 2, argv, &caught_exception);
+                                             isolate()->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
   return result;
 }
 
@@ -674,13 +676,16 @@
   Handle<JSFunction> fun = Handle<JSFunction>(
       JSFunction::cast(isolate()->js_builtins_object()->
                        GetPropertyNoExceptionThrown(*constr)));
-  Object** argv[1] = { Handle<Object>::cast(message).location() };
+  Handle<Object> argv[] = { message };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-      isolate()->js_builtins_object(), 1, argv, &caught_exception);
+                                             isolate()->js_builtins_object(),
+                                             ARRAY_SIZE(argv),
+                                             argv,
+                                             &caught_exception);
   return result;
 }
 
@@ -732,7 +737,12 @@
   if (force_initial_map ||
       type != JS_OBJECT_TYPE ||
       instance_size != JSObject::kHeaderSize) {
-    Handle<Map> initial_map = NewMap(type, instance_size);
+    ElementsKind default_elements_kind = FLAG_smi_only_arrays
+        ? FAST_SMI_ONLY_ELEMENTS
+        : FAST_ELEMENTS;
+    Handle<Map> initial_map = NewMap(type,
+                                     instance_size,
+                                     default_elements_kind);
     function->set_initial_map(*initial_map);
     initial_map->set_constructor(*function);
   }
@@ -749,7 +759,7 @@
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
                                                         Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
-                                                            kNonStrictMode);
+                                                            CLASSIC_MODE);
   function->shared()->set_code(*code);
   function->set_code(*code);
   ASSERT(!function->has_initial_map());
@@ -758,11 +768,11 @@
 }
 
 
-Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateSerializedScopeInfo(length),
-      SerializedScopeInfo);
+      isolate()->heap()->AllocateScopeInfo(length),
+      ScopeInfo);
 }
 
 
@@ -831,10 +841,13 @@
   // Number of descriptors added to the result so far.
   int descriptor_count = 0;
 
+  // Ensure that marking will not progress and change color of objects.
+  DescriptorArray::WhitenessWitness witness(*result);
+
   // Copy the descriptors from the array.
   for (int i = 0; i < array->number_of_descriptors(); i++) {
-    if (array->GetType(i) != NULL_DESCRIPTOR) {
-      result->CopyFrom(descriptor_count++, *array, i);
+    if (!array->IsNullDescriptor(i)) {
+      result->CopyFrom(descriptor_count++, *array, i, witness);
     }
   }
 
@@ -854,7 +867,7 @@
     if (result->LinearSearch(*key, descriptor_count) ==
         DescriptorArray::kNotFound) {
       CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
-      result->Set(descriptor_count, &desc);
+      result->Set(descriptor_count, &desc, witness);
       descriptor_count++;
     } else {
       duplicates++;
@@ -868,13 +881,13 @@
     Handle<DescriptorArray> new_result =
         NewDescriptorArray(number_of_descriptors);
     for (int i = 0; i < number_of_descriptors; i++) {
-      new_result->CopyFrom(i, *result, i);
+      new_result->CopyFrom(i, *result, i, witness);
     }
     result = new_result;
   }
 
   // Sort the result before returning.
-  result->Sort();
+  result->Sort(witness);
   return result;
 }
 
@@ -918,11 +931,26 @@
   Handle<JSArray> result =
       Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
                                         pretenure));
-  result->SetContent(*elements);
+  SetContent(result, elements);
   return result;
 }
 
 
+void Factory::SetContent(Handle<JSArray> array,
+                         Handle<FixedArray> elements) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->SetContent(*elements));
+}
+
+
+void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->EnsureCanContainNonSmiElements());
+}
+
+
 Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
                                     Handle<Object> prototype) {
   CALL_HEAP_FUNCTION(
@@ -948,11 +976,18 @@
 }
 
 
+void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      object->SetIdentityHash(hash, ALLOW_CREATION));
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
     Handle<Code> code,
-    Handle<SerializedScopeInfo> scope_info) {
+    Handle<ScopeInfo> scope_info) {
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
   shared->set_code(*code);
   shared->set_scope_info(*scope_info);
@@ -1000,23 +1035,19 @@
 }
 
 
-Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
-    Handle<SeededNumberDictionary> dictionary,
-    uint32_t key,
-    Handle<Object> value) {
+Handle<String> Factory::Uint32ToString(uint32_t value) {
   CALL_HEAP_FUNCTION(isolate(),
-                     dictionary->AtNumberPut(key, *value),
-                     SeededNumberDictionary);
+                     isolate()->heap()->Uint32ToString(value), String);
 }
 
 
-Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
-    Handle<UnseededNumberDictionary> dictionary,
+Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
+    Handle<NumberDictionary> dictionary,
     uint32_t key,
     Handle<Object> value) {
   CALL_HEAP_FUNCTION(isolate(),
                      dictionary->AtNumberPut(key, *value),
-                     UnseededNumberDictionary);
+                     NumberDictionary);
 }
 
 
@@ -1042,11 +1073,11 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
     Handle<String> name,
-    StrictModeFlag strict_mode) {
+    LanguageMode language_mode) {
   Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
-  Handle<Map> map = strict_mode == kStrictMode
-      ? isolate()->strict_mode_function_without_prototype_map()
-      : isolate()->function_without_prototype_map();
+  Handle<Map> map = (language_mode == CLASSIC_MODE)
+      ? isolate()->function_without_prototype_map()
+      : isolate()->strict_mode_function_without_prototype_map();
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->AllocateFunction(
                          *map,
@@ -1058,8 +1089,9 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
     Handle<String> name,
-    StrictModeFlag strict_mode) {
-  Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
+    LanguageMode language_mode) {
+  Handle<JSFunction> fun =
+      NewFunctionWithoutPrototypeHelper(name, language_mode);
   fun->set_context(isolate()->context()->global_context());
   return fun;
 }
@@ -1319,4 +1351,20 @@
 }
 
 
+Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
+  Heap* h = isolate()->heap();
+  if (name->Equals(h->undefined_symbol())) return undefined_value();
+  if (name->Equals(h->nan_symbol())) return nan_value();
+  if (name->Equals(h->infinity_symbol())) return infinity_value();
+  return Handle<Object>::null();
+}
+
+
+Handle<Object> Factory::ToBoolean(bool value) {
+  return Handle<Object>(value
+                        ? isolate()->heap()->true_value()
+                        : isolate()->heap()->false_value());
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/factory.h b/src/factory.h
index c9817fe..0f028e5 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -50,18 +50,16 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new uninitialized fixed double array.
-  Handle<FixedArray> NewFixedDoubleArray(
+  Handle<FixedDoubleArray> NewFixedDoubleArray(
       int size,
       PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<SeededNumberDictionary> NewSeededNumberDictionary(
-      int at_least_space_for);
-
-  Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
-      int at_least_space_for);
+  Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
 
   Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
+  Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
+
   Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
 
   Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@@ -149,9 +147,9 @@
   // not make sense to have a UTF-8 factory function for external strings,
   // because we cannot change the underlying buffer.
   Handle<String> NewExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   Handle<String> NewExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewGlobalContext();
@@ -174,7 +172,7 @@
   // Create a 'block' context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
-                                  Handle<SerializedScopeInfo> scope_info);
+                                  Handle<ScopeInfo> scope_info);
 
   // Return the Symbol matching the passed in string.
   Handle<String> SymbolFromString(Handle<String> value);
@@ -207,7 +205,9 @@
   Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
       Handle<Object> value);
 
-  Handle<Map> NewMap(InstanceType type, int instance_size);
+  Handle<Map> NewMap(InstanceType type,
+                     int instance_size,
+                     ElementsKind elements_kind = FAST_ELEMENTS);
 
   Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
 
@@ -219,22 +219,22 @@
 
   Handle<Map> CopyMapDropTransitions(Handle<Map> map);
 
-  Handle<Map> GetFastElementsMap(Handle<Map> map);
-
-  Handle<Map> GetSlowElementsMap(Handle<Map> map);
-
-  Handle<Map> GetElementsTransitionMap(Handle<Map> map,
-                                       ElementsKind elements_kind,
-                                       bool safe_to_add_transition);
+  Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+                                       ElementsKind elements_kind);
 
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
+  Handle<FixedDoubleArray> CopyFixedDoubleArray(
+      Handle<FixedDoubleArray> array);
+
   // Numbers (eg, literals) are pretenured by the parser.
   Handle<Object> NewNumber(double value,
                            PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<Object> NewNumberFromInt(int value);
-  Handle<Object> NewNumberFromUint(uint32_t value);
+  Handle<Object> NewNumberFromInt(int32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
+  Handle<Object> NewNumberFromUint(uint32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
 
   // These objects are used by the api to create env-independent data
   // structures in the heap.
@@ -262,18 +262,24 @@
       Handle<FixedArray> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
+  void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+
+  void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
   // Change the type of the argument into a JS object/function and reinitialize.
   void BecomeJSObject(Handle<JSReceiver> object);
   void BecomeJSFunction(Handle<JSReceiver> object);
 
+  void SetIdentityHash(Handle<JSObject> object, Object* hash);
+
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
   Handle<JSFunction> NewFunctionWithoutPrototype(
       Handle<String> name,
-      StrictModeFlag strict_mode);
+      LanguageMode language_mode);
 
   Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
 
@@ -287,7 +293,7 @@
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
-  Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
+  Handle<ScopeInfo> NewScopeInfo(int length);
 
   Handle<Code> NewCode(const CodeDesc& desc,
                        Code::Flags flags,
@@ -360,6 +366,7 @@
       PropertyAttributes attributes);
 
   Handle<String> NumberToString(Handle<Object> number);
+  Handle<String> Uint32ToString(uint32_t value);
 
   enum ApiInstanceType {
     JavaScriptObject,
@@ -404,7 +411,7 @@
       Handle<String> name,
       int number_of_literals,
       Handle<Code> code,
-      Handle<SerializedScopeInfo> scope_info);
+      Handle<ScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   Handle<JSMessageObject> NewJSMessageObject(
@@ -416,13 +423,8 @@
       Handle<Object> stack_trace,
       Handle<Object> stack_frames);
 
-  Handle<SeededNumberDictionary> DictionaryAtNumberPut(
-      Handle<SeededNumberDictionary>,
-      uint32_t key,
-      Handle<Object> value);
-
-  Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
-      Handle<UnseededNumberDictionary>,
+  Handle<NumberDictionary> DictionaryAtNumberPut(
+      Handle<NumberDictionary>,
       uint32_t key,
       Handle<Object> value);
 
@@ -451,6 +453,14 @@
                              JSRegExp::Flags flags,
                              int capture_count);
 
+  // Returns the value for a known global constant (a property of the global
+  // object which is neither configurable nor writable) like 'undefined'.
+  // Returns a null handle when the given name is unknown.
+  Handle<Object> GlobalConstantFor(Handle<String> name);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  Handle<Object> ToBoolean(bool value);
+
  private:
   Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
 
@@ -459,7 +469,7 @@
 
   Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
       Handle<String> name,
-      StrictModeFlag strict_mode);
+      LanguageMode language_mode);
 
   Handle<DescriptorArray> CopyAppendCallbackDescriptors(
       Handle<DescriptorArray> array,
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
index 94c22ec..ef28557 100644
--- a/src/fast-dtoa.h
+++ b/src/fast-dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,7 +43,7 @@
 
 // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
 // include the terminating '\0' character.
-static const int kFastDtoaMaximalLength = 17;
+const int kFastDtoaMaximalLength = 17;
 
 // Provides a decimal representation of v.
 // The result should be interpreted as buffer * 10^(point - length).
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index e8f6349..f145df7 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -98,20 +98,23 @@
 
 // Flags for experimental language features.
 DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
+DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
 DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
-DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
+DEFINE_bool(harmony_collections, false,
+            "enable harmony collections (sets, maps, and weak maps)")
+DEFINE_bool(harmony, false, "enable all harmony features")
 
 // Flags for experimental implementation features.
 DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, false, "use string slices")
+DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
+DEFINE_bool(string_slices, true, "use string slices")
+
+DEFINE_bool(clever_optimizations,
+            true,
+            "Optimize object size, Array shift, DOM strings and string +")
 
 // Flags for Crankshaft.
-#ifdef V8_TARGET_ARCH_MIPS
-  DEFINE_bool(crankshaft, false, "use crankshaft")
-#else
-  DEFINE_bool(crankshaft, true, "use crankshaft")
-#endif
+DEFINE_bool(crankshaft, true, "use crankshaft")
 DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
 DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
 DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@@ -125,6 +128,9 @@
 DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
 DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
 DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
+            true,
+            "crankshaft harvests type feedback from stub cache")
 DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
 DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
 DEFINE_bool(trace_inlining, false, "trace inlining decisions")
@@ -180,6 +186,8 @@
 DEFINE_bool(expose_externalize_string, false,
             "expose externalize string extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_bool(builtins_in_stack_traces, false,
+            "show built-in functions in stack traces")
 DEFINE_bool(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
@@ -253,10 +261,16 @@
             "print cumulative GC statistics in name=value format on exit")
 DEFINE_bool(trace_gc_verbose, false,
             "print more details following each garbage collection")
+DEFINE_bool(trace_fragmentation, false,
+            "report fragmentation for old pointer and data pages")
 DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 DEFINE_bool(flush_code, true,
             "flush code that we expect not to use again before full gc")
+DEFINE_bool(incremental_marking, true, "use incremental marking")
+DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_bool(trace_incremental_marking, false,
+            "trace progress of the incremental marking")
 
 // v8.cc
 DEFINE_bool(use_idle_notification, true,
@@ -276,8 +290,13 @@
 
 // mark-compact.cc
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
+DEFINE_bool(lazy_sweeping, true,
+            "Use lazy sweeping for old pointer and data spaces")
+DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
+            "Flush code caches in maps during mark compact cycle.")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
+DEFINE_bool(compact_code_space, false, "Compact code space")
 DEFINE_bool(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
@@ -288,9 +307,6 @@
 DEFINE_bool(canonicalize_object_literal_maps, true,
             "Canonicalize maps for object literals.")
 
-DEFINE_bool(use_big_map_space, true,
-            "Use big map space, but don't compact if it grew too big.")
-
 DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
            "Maximum number of pages in map space which still allows to encode "
            "forwarding pointers.  That's actually a constant, but it's useful "
@@ -305,11 +321,11 @@
 
 // parser.cc
 DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(strict_mode, true, "allow strict mode directives")
 
 // simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
+DEFINE_bool(check_icache, false,
+            "Check icache flushes in ARM and MIPS simulator")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -319,14 +335,6 @@
             "print stack trace when throwing exceptions")
 DEFINE_bool(preallocate_message_memory, false,
             "preallocate some memory to build stack traces.")
-DEFINE_bool(randomize_hashes,
-            true,
-            "randomize hashes to avoid predictable hash collisions "
-            "(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed,
-           0,
-           "Fixed seed to use to hash property keys (0 means random)"
-           "(with snapshots this option cannot override the baked-in seed)")
 
 // v8.cc
 DEFINE_bool(preemption, false,
@@ -334,7 +342,6 @@
 
 // Regexp
 DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
-DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
 DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -356,11 +363,15 @@
 
 DEFINE_bool(help, false, "Print usage message, including flags, on console")
 DEFINE_bool(dump_counters, false, "Dump counters on exit")
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
 DEFINE_bool(debugger, false, "Enable JavaScript debugger")
 DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
                                     "debugger agent in another process")
 DEFINE_bool(debugger_agent, false, "Enable debugger agent")
 DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
 DEFINE_string(map_counters, "", "Map counters to a file")
 DEFINE_args(js_arguments, JSArguments(),
             "Pass all remaining arguments to the script. Alias for \"--\".")
@@ -386,6 +397,15 @@
 DEFINE_string(gdbjit_dump_filter, "",
               "dump only objects containing this substring")
 
+// mark-compact.cc
+DEFINE_bool(force_marking_deque_overflows, false,
+            "force overflows of marking deque by reducing it's size "
+            "to 64 words")
+
+DEFINE_bool(stress_compaction, false,
+            "stress the GC compactor to flush out bugs (implies "
+            "--force_marking_deque_overflows)")
+
 //
 // Debug only flags
 //
@@ -412,7 +432,6 @@
 DEFINE_bool(print_builtin_json_ast, false,
             "print source AST for builtins as JSON")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
 
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -509,6 +528,9 @@
 #define FLAG FLAG_READONLY
 #endif
 
+// elements.cc
+DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+
 // code-stubs.cc
 DEFINE_bool(print_code_stubs, false, "print code stubs")
 
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 7ba79bf..af3ae3d 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -68,7 +68,7 @@
 
 inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
   v->VisitPointer(context_address());
-  StackFrame::IteratePc(v, pc_address(), holder);
+  v->VisitPointer(code_address());
 }
 
 
@@ -77,9 +77,24 @@
 }
 
 
-inline StackHandler::State StackHandler::state() const {
+inline bool StackHandler::is_entry() const {
+  return kind() == ENTRY;
+}
+
+
+inline bool StackHandler::is_try_catch() const {
+  return kind() == TRY_CATCH;
+}
+
+
+inline bool StackHandler::is_try_finally() const {
+  return kind() == TRY_FINALLY;
+}
+
+
+inline StackHandler::Kind StackHandler::kind() const {
   const int offset = StackHandlerConstants::kStateOffset;
-  return static_cast<State>(Memory::int_at(address() + offset));
+  return KindField::decode(Memory::unsigned_at(address() + offset));
 }
 
 
@@ -89,9 +104,9 @@
 }
 
 
-inline Address* StackHandler::pc_address() const {
-  const int offset = StackHandlerConstants::kPCOffset;
-  return reinterpret_cast<Address*>(address() + offset);
+inline Object** StackHandler::code_address() const {
+  const int offset = StackHandlerConstants::kCodeOffset;
+  return reinterpret_cast<Object**>(address() + offset);
 }
 
 
@@ -105,8 +120,33 @@
 }
 
 
+inline Code* StackFrame::LookupCode() const {
+  return GetContainingCode(isolate(), pc());
+}
+
+
 inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
-  return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+  return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+    : EntryFrame(iterator) {
+}
+
+
+inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
 }
 
 
@@ -155,6 +195,11 @@
 }
 
 
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
 Address JavaScriptFrame::GetParameterSlot(int index) const {
   int param_count = ComputeParametersCount();
   ASSERT(-1 <= index && index < param_count);
@@ -190,6 +235,26 @@
 }
 
 
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+    : JavaScriptFrame(iterator) {
+}
+
+
+inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
+    StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+}
+
+
+inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
+inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+    : InternalFrame(iterator) {
+}
+
+
 template<typename Iterator>
 inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
     Isolate* isolate)
@@ -197,6 +262,15 @@
   if (!done()) Advance();
 }
 
+
+template<typename Iterator>
+inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+    Isolate* isolate, ThreadLocalTop* top)
+    : iterator_(isolate, top) {
+  if (!done()) Advance();
+}
+
+
 template<typename Iterator>
 inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
diff --git a/src/frames.cc b/src/frames.cc
index 60b1aad..9fd0042 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -366,16 +366,17 @@
 
 
 Code* StackFrame::GetSafepointData(Isolate* isolate,
-                                   Address pc,
+                                   Address inner_pointer,
                                    SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
-  PcToCodeCache::PcToCodeCacheEntry* entry =
-      isolate->pc_to_code_cache()->GetCacheEntry(pc);
+  InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
+      isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
   if (!entry->safepoint_entry.is_valid()) {
-    entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+    entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
     ASSERT(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
+    ASSERT(entry->safepoint_entry.Equals(
+        entry->code->GetSafepointEntry(inner_pointer)));
   }
 
   // Fill in the results and return the code.
@@ -392,11 +393,16 @@
 }
 
 
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* object, Address addr);
+#endif
+
+
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
   Address pc = *pc_address;
-  ASSERT(holder->contains(pc));
+  ASSERT(GcSafeCodeContains(holder, pc));
   unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
   Object* code = holder;
   v->VisitPointer(&code);
@@ -705,6 +711,69 @@
 }
 
 
+void JavaScriptFrame::PrintTop(FILE* file,
+                               bool print_args,
+                               bool print_line_number) {
+  // constructor calls
+  HandleScope scope;
+  AssertNoAllocation no_allocation;
+  JavaScriptFrameIterator it;
+  while (!it.done()) {
+    if (it.frame()->is_java_script()) {
+      JavaScriptFrame* frame = it.frame();
+      if (frame->IsConstructor()) PrintF(file, "new ");
+      // function name
+      Object* fun = frame->function();
+      if (fun->IsJSFunction()) {
+        SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
+        shared->DebugName()->ShortPrint(file);
+        if (print_line_number) {
+          Address pc = frame->pc();
+          Code* code = Code::cast(
+              v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
+          int source_pos = code->SourcePosition(pc);
+          Object* maybe_script = shared->script();
+          if (maybe_script->IsScript()) {
+            Handle<Script> script(Script::cast(maybe_script));
+            int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+            Object* script_name_raw = script->name();
+            if (script_name_raw->IsString()) {
+              String* script_name = String::cast(script->name());
+              SmartArrayPointer<char> c_script_name =
+                  script_name->ToCString(DISALLOW_NULLS,
+                                         ROBUST_STRING_TRAVERSAL);
+              PrintF(file, " at %s:%d", *c_script_name, line);
+            } else {
+              PrintF(file, "at <unknown>:%d", line);
+            }
+          } else {
+            PrintF(file, " at <unknown>:<unknown>");
+          }
+        }
+      } else {
+        fun->ShortPrint(file);
+      }
+
+      if (print_args) {
+        // function arguments
+        // (we are intentionally only printing the actually
+        // supplied parameters, not all parameters required)
+        PrintF(file, "(this=");
+        frame->receiver()->ShortPrint(file);
+        const int length = frame->ComputeParametersCount();
+        for (int i = 0; i < length; i++) {
+          PrintF(file, ", ");
+          frame->GetParameter(i)->ShortPrint(file);
+        }
+        PrintF(file, ")");
+      }
+      break;
+    }
+    it.Advance();
+  }
+}
+
+
 void FrameSummary::Print() {
   PrintF("receiver: ");
   receiver_->ShortPrint();
@@ -819,7 +888,8 @@
   // back to a slow search in this case to find the original optimized
   // code object.
   if (!code->contains(pc())) {
-    code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
+    code = isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc());
   }
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -881,6 +951,11 @@
 }
 
 
+int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
+  return Smi::cast(GetExpression(0))->value();
+}
+
+
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
@@ -927,11 +1002,15 @@
   if (IsConstructor()) accumulator->Add("new ");
   accumulator->PrintFunction(function, receiver, &code);
 
-  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
+  // Get scope information for nicer output, if possible. If code is NULL, or
+  // doesn't contain scope info, scope_info will return 0 for the number of
+  // parameters, stack local variables, context local variables, stack slots,
+  // or context slots.
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
   if (function->IsJSFunction()) {
     Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
-    scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
+    scope_info = Handle<ScopeInfo>(shared->scope_info());
     Object* script_obj = shared->script();
     if (script_obj->IsScript()) {
       Handle<Script> script(Script::cast(script_obj));
@@ -956,11 +1035,6 @@
 
   accumulator->Add("(this=%o", receiver);
 
-  // Get scope information for nicer output, if possible. If code is
-  // NULL, or doesn't contain scope info, info will return 0 for the
-  // number of parameters, stack slots, or context slots.
-  ScopeInfo<PreallocatedStorage> info(*scope_info);
-
   // Print the parameters.
   int parameters_count = ComputeParametersCount();
   for (int i = 0; i < parameters_count; i++) {
@@ -968,8 +1042,8 @@
     // If we have a name for the parameter we print it. Nameless
     // parameters are either because we have more actual parameters
     // than formal parameters or because we have no scope information.
-    if (i < info.number_of_parameters()) {
-      accumulator->PrintName(*info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      accumulator->PrintName(scope_info->ParameterName(i));
       accumulator->Add("=");
     }
     accumulator->Add("%o", GetParameter(i));
@@ -987,8 +1061,8 @@
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
-  int stack_locals_count = info.number_of_stack_slots();
-  int heap_locals_count = info.number_of_context_slots();
+  int stack_locals_count = scope_info->StackLocalCount();
+  int heap_locals_count = scope_info->ContextLocalCount();
   int expressions_count = ComputeExpressionsCount();
 
   // Print stack-allocated local variables.
@@ -997,7 +1071,7 @@
   }
   for (int i = 0; i < stack_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.stack_slot_name(i));
+    accumulator->PrintName(scope_info->StackLocalName(i));
     accumulator->Add(" = ");
     if (i < expressions_count) {
       accumulator->Add("%o", GetExpression(i));
@@ -1014,16 +1088,16 @@
   }
 
   // Print heap-allocated local variables.
-  if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
+  if (heap_locals_count > 0) {
     accumulator->Add("  // heap-allocated locals\n");
   }
-  for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
+  for (int i = 0; i < heap_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(*info.context_slot_name(i));
+    accumulator->PrintName(scope_info->ContextLocalName(i));
     accumulator->Add(" = ");
     if (context != NULL) {
       if (i < context->length()) {
-        accumulator->Add("%o", context->get(i));
+        accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
       } else {
         accumulator->Add(
             "// warning: missing context slot - inconsistent frame?");
@@ -1155,53 +1229,89 @@
 // -------------------------------------------------------------------------
 
 
-Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+  MapWord map_word = object->map_word();
+  return map_word.IsForwardingAddress() ?
+      map_word.ToForwardingAddress()->map() : map_word.ToMap();
+}
+
+
+static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+  return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* code, Address addr) {
+  Map* map = GcSafeMapOfCodeSpaceObject(code);
+  ASSERT(map == code->GetHeap()->code_map());
+  Address start = code->address();
+  Address end = code->address() + code->SizeFromMap(map);
+  return start <= addr && addr < end;
+}
+#endif
+
+
+Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
+                                                Address inner_pointer) {
   Code* code = reinterpret_cast<Code*>(object);
-  ASSERT(code != NULL && code->contains(pc));
+  ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
   return code;
 }
 
 
-Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
+    Address inner_pointer) {
   Heap* heap = isolate_->heap();
-  // Check if the pc points into a large object chunk.
-  LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
-  if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+  // Check if the inner pointer points into a large object chunk.
+  LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
+  if (large_page != NULL) {
+    return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
+  }
 
-  // Iterate through the 8K page until we reach the end or find an
-  // object starting after the pc.
-  Page* page = Page::FromAddress(pc);
-  HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
-  HeapObject* previous = NULL;
+  // Iterate through the page until we reach the end or find an object starting
+  // after the inner pointer.
+  Page* page = Page::FromAddress(inner_pointer);
+
+  Address addr = page->skip_list()->StartFor(inner_pointer);
+
+  Address top = heap->code_space()->top();
+  Address limit = heap->code_space()->limit();
+
   while (true) {
-    HeapObject* next = iterator.next();
-    if (next == NULL || next->address() >= pc) {
-      return GcSafeCastToCode(previous, pc);
+    if (addr == top && addr != limit) {
+      addr = limit;
+      continue;
     }
-    previous = next;
+
+    HeapObject* obj = HeapObject::FromAddress(addr);
+    int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+    Address next_addr = addr + obj_size;
+    if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
+    addr = next_addr;
   }
 }
 
 
-PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
+    InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
   isolate_->counters()->pc_to_code()->Increment();
-  ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+  ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)),
-      v8::internal::kZeroHashSeed);
-  uint32_t index = hash & (kPcToCodeCacheSize - 1);
-  PcToCodeCacheEntry* entry = cache(index);
-  if (entry->pc == pc) {
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
+  uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
+  InnerPointerToCodeCacheEntry* entry = cache(index);
+  if (entry->inner_pointer == inner_pointer) {
     isolate_->counters()->pc_to_code_cached()->Increment();
-    ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+    ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
   } else {
     // Because this code may be interrupted by a profiling signal that
-    // also queries the cache, we cannot update pc before the code has
-    // been set. Otherwise, we risk trying to use a cache entry before
+    // also queries the cache, we cannot update inner_pointer before the code
+    // has been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
-    entry->code = GcSafeFindCodeForPc(pc);
+    entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
     entry->safepoint_entry.Reset();
-    entry->pc = pc;
+    entry->inner_pointer = inner_pointer;
   }
   return entry;
 }
diff --git a/src/frames.h b/src/frames.h
index fed11c4..2c5e571 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -49,47 +49,52 @@
 class ThreadLocalTop;
 class Isolate;
 
-class PcToCodeCache {
+class InnerPointerToCodeCache {
  public:
-  struct PcToCodeCacheEntry {
-    Address pc;
+  struct InnerPointerToCodeCacheEntry {
+    Address inner_pointer;
     Code* code;
     SafepointEntry safepoint_entry;
   };
 
-  explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+  explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
     Flush();
   }
 
-  Code* GcSafeFindCodeForPc(Address pc);
-  Code* GcSafeCastToCode(HeapObject* object, Address pc);
+  Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+  Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
 
   void Flush() {
     memset(&cache_[0], 0, sizeof(cache_));
   }
 
-  PcToCodeCacheEntry* GetCacheEntry(Address pc);
+  InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
 
  private:
-  PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+  InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
 
   Isolate* isolate_;
 
-  static const int kPcToCodeCacheSize = 1024;
-  PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+  static const int kInnerPointerToCodeCacheSize = 1024;
+  InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
 
-  DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
+  DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
 };
 
 
 class StackHandler BASE_EMBEDDED {
  public:
-  enum State {
+  enum Kind {
     ENTRY,
     TRY_CATCH,
     TRY_FINALLY
   };
 
+  static const int kKindWidth = 2;
+  static const int kOffsetWidth = 32 - kKindWidth;
+  class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
+  class OffsetField: public BitField<unsigned, kKindWidth, kOffsetWidth> {};
+
   // Get the address of this stack handler.
   inline Address address() const;
 
@@ -106,16 +111,16 @@
   static inline StackHandler* FromAddress(Address address);
 
   // Testers
-  bool is_entry() { return state() == ENTRY; }
-  bool is_try_catch() { return state() == TRY_CATCH; }
-  bool is_try_finally() { return state() == TRY_FINALLY; }
+  inline bool is_entry() const;
+  inline bool is_try_catch() const;
+  inline bool is_try_finally() const;
 
  private:
   // Accessors.
-  inline State state() const;
+  inline Kind kind() const;
 
   inline Object** context_address() const;
-  inline Address* pc_address() const;
+  inline Object** code_address() const;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
 };
@@ -139,7 +144,10 @@
   enum Type {
     NONE = 0,
     STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
-    NUMBER_OF_TYPES
+    NUMBER_OF_TYPES,
+    // Used by FrameScope to indicate that the stack frame is constructed
+    // manually and the FrameScope does not need to emit code.
+    MANUAL
   };
 #undef DECLARE_TYPE
 
@@ -215,9 +223,7 @@
   virtual Code* unchecked_code() const = 0;
 
   // Get the code associated with this frame.
-  Code* LookupCode() const {
-    return GetContainingCode(isolate(), pc());
-  }
+  inline Code* LookupCode() const;
 
   // Get the code object that contains the given pc.
   static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -299,7 +305,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
  protected:
-  explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit EntryFrame(StackFrameIterator* iterator);
 
   // The caller stack pointer for entry frames is always zero. The
   // real information about the caller frame is available through the
@@ -326,8 +332,7 @@
   }
 
  protected:
-  explicit EntryConstructFrame(StackFrameIterator* iterator)
-      : EntryFrame(iterator) { }
+  inline explicit EntryConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -361,7 +366,7 @@
   static void FillState(Address fp, Address sp, State* state);
 
  protected:
-  explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit ExitFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -394,8 +399,7 @@
   }
 
  protected:
-  explicit StandardFrame(StackFrameIterator* iterator)
-      : StackFrame(iterator) { }
+  inline explicit StandardFrame(StackFrameIterator* iterator);
 
   virtual void ComputeCallerState(State* state) const;
 
@@ -513,9 +517,10 @@
     return static_cast<JavaScriptFrame*>(frame);
   }
 
+  static void PrintTop(FILE* file, bool print_args, bool print_line_number);
+
  protected:
-  explicit JavaScriptFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit JavaScriptFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -552,8 +557,7 @@
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
 
  protected:
-  explicit OptimizedFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit OptimizedFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -581,12 +585,9 @@
                      int index) const;
 
  protected:
-  explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
 
-  virtual int GetNumberOfIncomingArguments() const {
-    return Smi::cast(GetExpression(0))->value();
-  }
+  virtual int GetNumberOfIncomingArguments() const;
 
   virtual Address GetCallerStackPointer() const;
 
@@ -611,8 +612,7 @@
   }
 
  protected:
-  explicit InternalFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit InternalFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -633,8 +633,7 @@
   }
 
  protected:
-  explicit ConstructFrame(StackFrameIterator* iterator)
-      : InternalFrame(iterator) { }
+  inline explicit ConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -710,20 +709,26 @@
 
   inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
 
+  inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
+
   // Skip frames until the frame with the given id is reached.
   explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
 
   inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
 
-  JavaScriptFrameIteratorTemp(Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+  JavaScriptFrameIteratorTemp(Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
 
   JavaScriptFrameIteratorTemp(Isolate* isolate,
-                              Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+                              Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(isolate, fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 8073874..04086d4 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -244,11 +244,6 @@
 }
 
 
-void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
-  Visit(expr->expression());
-}
-
-
 void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
   Visit(expr->left());
   Visit(expr->right());
@@ -291,12 +286,16 @@
   code->set_optimizable(info->IsOptimizable());
   cgen.PopulateDeoptimizationData(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_handler_table(*cgen.handler_table());
+#ifdef ENABLE_DEBUGGER_SUPPORT
   code->set_has_debug_break_slots(
       info->isolate()->debugger()->IsDebuggerActive());
+  code->set_compiled_optimizable(info->IsOptimizable());
+#endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
-  info->SetCode(code);  // may be an empty handle.
+  info->SetCode(code);  // May be an empty handle.
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit && !code.is_null()) {
     GDBJITLineInfo* lineinfo =
@@ -363,7 +362,7 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
   // There's no need to prepare this code for bailouts from already optimized
   // code or code that can't be optimized.
   if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
@@ -384,10 +383,11 @@
 }
 
 
-void FullCodeGenerator::RecordStackCheck(int ast_id) {
+void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
   // The pc offset does not need to be encoded and packed together with a
   // state.
-  BailoutEntry entry = { ast_id, masm_->pc_offset() };
+  ASSERT(masm_->pc_offset() > 0);
+  BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
   stack_checks_.Add(entry);
 }
 
@@ -412,27 +412,24 @@
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
   __ push(reg);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
   __ Drop(1);
-  codegen()->decrement_stack_height();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
   __ pop(result_register());
-  codegen()->decrement_stack_height();
 }
 
 
@@ -443,8 +440,7 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
-  codegen()->decrement_stack_height();
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -523,8 +519,8 @@
       if (var->IsUnallocated()) {
         array->set(j++, *(var->name()));
         if (decl->fun() == NULL) {
-          if (var->mode() == Variable::CONST) {
-            // In case this is const property use the hole.
+          if (var->binding_needs_init()) {
+            // In case this binding needs initialization use the hole.
             array->set_the_hole(j++);
           } else {
             array->set_undefined(j++);
@@ -549,11 +545,10 @@
 
 
 int FullCodeGenerator::DeclareGlobalsFlags() {
-  int flags = 0;
-  if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
-  if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
-  if (is_native()) flags |= kDeclareGlobalsNativeFlag;
-  return flags;
+  ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+  return DeclareGlobalsEvalFlag::encode(is_eval()) |
+      DeclareGlobalsNativeFlag::encode(is_native()) |
+      DeclareGlobalsLanguageMode::encode(language_mode());
 }
 
 
@@ -659,14 +654,13 @@
 }
 
 
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
-  ZoneList<Expression*>* args = node->arguments();
-  const Runtime::Function* function = node->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
   ASSERT(function != NULL);
   ASSERT(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
-  ((*this).*(generator))(args);
+  ((*this).*(generator))(expr);
 }
 
 
@@ -683,11 +677,25 @@
 }
 
 
+void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
+  if (context()->IsEffect()) {
+    VisitForEffect(expr);
+  } else if (context()->IsAccumulatorValue()) {
+    VisitForAccumulatorValue(expr);
+  } else if (context()->IsStackValue()) {
+    VisitForStackValue(expr);
+  } else if (context()->IsTest()) {
+    const TestContext* test = TestContext::cast(context());
+    VisitForControl(expr, test->true_label(), test->false_label(),
+                    test->fall_through());
+  }
+}
+
+
 void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ Comma");
   VisitForEffect(expr->left());
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
-  VisitInCurrentContext(expr->right());
+  VisitInDuplicateContext(expr->right());
 }
 
 
@@ -709,7 +717,6 @@
     }
     PrepareForBailoutForId(right_id, NO_REGISTERS);
     __ bind(&eval_right);
-    ForwardBailoutToChild(expr);
 
   } else if (context()->IsAccumulatorValue()) {
     VisitForAccumulatorValue(left);
@@ -717,7 +724,6 @@
     // case we need it.
     __ push(result_register());
     Label discard, restore;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &restore, &restore);
     } else {
@@ -736,7 +742,6 @@
     // case we need it.
     __ push(result_register());
     Label discard;
-    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &done, &discard);
     } else {
@@ -758,7 +763,7 @@
     __ bind(&eval_right);
   }
 
-  VisitInCurrentContext(right);
+  VisitInDuplicateContext(right);
   __ bind(&done);
 }
 
@@ -785,34 +790,6 @@
 }
 
 
-void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
-  if (!info_->HasDeoptimizationSupport()) return;
-  ASSERT(context()->IsTest());
-  ASSERT(expr == forward_bailout_stack_->expr());
-  forward_bailout_pending_ = forward_bailout_stack_;
-}
-
-
-void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
-  if (context()->IsTest()) {
-    ForwardBailoutStack stack(expr, forward_bailout_pending_);
-    ForwardBailoutStack* saved = forward_bailout_stack_;
-    forward_bailout_pending_ = NULL;
-    forward_bailout_stack_ = &stack;
-    Visit(expr);
-    forward_bailout_stack_ = saved;
-  } else {
-    ASSERT(forward_bailout_pending_ == NULL);
-    Visit(expr);
-    State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
-    PrepareForBailout(expr, state);
-    // Forwarding bailouts to children is a one shot operation. It should have
-    // been processed at this point.
-    ASSERT(forward_bailout_pending_ == NULL);
-  }
-}
-
-
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   NestedBlock nested_block(this, stmt);
@@ -823,9 +800,18 @@
   if (stmt->block_scope() != NULL) {
     { Comment cmnt(masm_, "[ Extend block context");
       scope_ = stmt->block_scope();
-      __ Push(scope_->GetSerializedScopeInfo());
+      Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+      int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+      __ Push(scope_info);
       PushFunctionArgumentForContextAllocation();
-      __ CallRuntime(Runtime::kPushBlockContext, 2);
+      if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+        FastNewBlockContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kPushBlockContext, 2);
+      }
+
+      // Replace the context stored in the frame.
       StoreToFrameField(StandardFrameConstants::kContextOffset,
                         context_register());
     }
@@ -972,7 +958,6 @@
   VisitForStackValue(stmt->expression());
   PushFunctionArgumentForContextAllocation();
   __ CallRuntime(Runtime::kPushWithContext, 2);
-  decrement_stack_height();
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   { WithOrCatch body(this);
@@ -1103,20 +1088,17 @@
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
   SetStatementPosition(stmt);
-  // The try block adds a handler to the exception handler chain
-  // before entering, and removes it again when exiting normally.
-  // If an exception is thrown during execution of the try block,
-  // control is passed to the handler, which also consumes the handler.
-  // At this point, the exception is in a register, and store it in
-  // the temporary local variable (prints as ".catch-var") before
-  // executing the catch block. The catch block has been rewritten
-  // to introduce a new scope to bind the catch variable and to remove
-  // that scope again afterwards.
+  // The try block adds a handler to the exception handler chain before
+  // entering, and removes it again when exiting normally.  If an exception
+  // is thrown during execution of the try block, the handler is consumed
+  // and control is passed to the catch block with the exception in the
+  // result register.
 
-  Label try_handler_setup, done;
-  __ Call(&try_handler_setup);
-  // Try handler code, exception in result register.
-
+  Label try_entry, handler_entry, exit;
+  __ jmp(&try_entry);
+  __ bind(&handler_entry);
+  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+  // Exception handler code, the exception is in the result register.
   // Extend the context before executing the catch block.
   { Comment cmnt(masm_, "[ Extend catch context");
     __ Push(stmt->variable()->name());
@@ -1130,27 +1112,23 @@
   Scope* saved_scope = scope();
   scope_ = stmt->scope();
   ASSERT(scope_->declarations()->is_empty());
-  { WithOrCatch body(this);
+  { WithOrCatch catch_body(this);
     Visit(stmt->catch_block());
   }
   // Restore the context.
   LoadContextField(context_register(), Context::PREVIOUS_INDEX);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
   scope_ = saved_scope;
-  __ jmp(&done);
+  __ jmp(&exit);
 
   // Try block code. Sets up the exception handler chain.
-  __ bind(&try_handler_setup);
-  {
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
-    TryCatch try_block(this);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
-    increment_stack_height(delta);
+  __ bind(&try_entry);
+  __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER, stmt->index());
+  { TryCatch try_body(this);
     Visit(stmt->try_block());
-    __ PopTryHandler();
-    decrement_stack_height(delta);
   }
-  __ bind(&done);
+  __ PopTryHandler();
+  __ bind(&exit);
 }
 
 
@@ -1162,12 +1140,12 @@
   //
   // The try-finally construct can enter the finally block in three ways:
   // 1. By exiting the try-block normally. This removes the try-handler and
-  //      calls the finally block code before continuing.
+  //    calls the finally block code before continuing.
   // 2. By exiting the try-block with a function-local control flow transfer
   //    (break/continue/return). The site of the, e.g., break removes the
   //    try handler and calls the finally block code before continuing
   //    its outward control transfer.
-  // 3. by exiting the try-block with a thrown exception.
+  // 3. By exiting the try-block with a thrown exception.
   //    This can happen in nested function calls. It traverses the try-handler
   //    chain and consumes the try-handler entry before jumping to the
   //    handler code. The handler code then calls the finally-block before
@@ -1178,49 +1156,39 @@
   // exception) in the result register (rax/eax/r0), both of which must
   // be preserved. The return address isn't GC-safe, so it should be
   // cooked before GC.
-  Label finally_entry;
-  Label try_handler_setup;
-  const int original_stack_height = stack_height();
+  Label try_entry, handler_entry, finally_entry;
 
-  // Setup the try-handler chain. Use a call to
-  // Jump to try-handler setup and try-block code. Use call to put try-handler
-  // address on stack.
-  __ Call(&try_handler_setup);
-  // Try handler code. Return address of call is pushed on handler stack.
-  {
-    // This code is only executed during stack-handler traversal when an
-    // exception is thrown. The exception is in the result register, which
-    // is retained by the finally block.
-    // Call the finally block and then rethrow the exception if it returns.
-    __ Call(&finally_entry);
-    __ push(result_register());
-    __ CallRuntime(Runtime::kReThrow, 1);
-  }
+  // Jump to try-handler setup and try-block code.
+  __ jmp(&try_entry);
+  __ bind(&handler_entry);
+  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
+  // Exception handler code.  This code is only executed when an exception
+  // is thrown.  The exception is in the result register, and must be
+  // preserved by the finally block.  Call the finally block and then
+  // rethrow the exception if it returns.
+  __ Call(&finally_entry);
+  __ push(result_register());
+  __ CallRuntime(Runtime::kReThrow, 1);
 
+  // Finally block implementation.
   __ bind(&finally_entry);
-  {
-    // Finally block implementation.
-    Finally finally_block(this);
-    EnterFinallyBlock();
-    set_stack_height(original_stack_height + Finally::kElementCount);
+  EnterFinallyBlock();
+  { Finally finally_body(this);
     Visit(stmt->finally_block());
-    ExitFinallyBlock();  // Return to the calling code.
   }
+  ExitFinallyBlock();  // Return to the calling code.
 
-  __ bind(&try_handler_setup);
-  {
-    // Setup try handler (stack pointer registers).
-    const int delta = StackHandlerConstants::kSize / kPointerSize;
-    TryFinally try_block(this, &finally_entry);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
-    set_stack_height(original_stack_height + delta);
+  // Setup try handler.
+  __ bind(&try_entry);
+  __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index());
+  { TryFinally try_body(this, &finally_entry);
     Visit(stmt->try_block());
-    __ PopTryHandler();
-    set_stack_height(original_stack_height);
   }
+  __ PopTryHandler();
   // Execute the finally block on the way out.  Clobber the unpredictable
-  // value in the accumulator with one that's safe for GC.  The finally
-  // block will unconditionally preserve the accumulator on the stack.
+  // value in the result register with one that's safe for GC because the
+  // finally block will unconditionally preserve the result register on the
+  // stack.
   ClearAccumulator();
   __ Call(&finally_entry);
 }
@@ -1246,7 +1214,6 @@
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
-  int start_stack_height = stack_height();
   if (context()->IsTest()) {
     const TestContext* for_test = TestContext::cast(context());
     VisitForControl(expr->then_expression(),
@@ -1254,17 +1221,15 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    VisitInCurrentContext(expr->then_expression());
+    VisitInDuplicateContext(expr->then_expression());
     __ jmp(&done);
   }
 
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
-  set_stack_height(start_stack_height);
-  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  VisitInCurrentContext(expr->else_expression());
+  VisitInDuplicateContext(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1301,11 +1266,8 @@
 
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
-  // Throw has no effect on the stack height or the current expression context.
-  // Usually the expression context is null, because throw is a statement.
   VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
-  decrement_stack_height();
   // Never returns here.
 }
 
@@ -1321,19 +1283,21 @@
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  Expression *expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
+  Expression *sub_expr;
   Handle<String> check;
-  if (compare->IsLiteralCompareTypeof(&expr, &check)) {
-    EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    EmitLiteralCompareTypeof(expr, sub_expr, check);
     return true;
   }
 
-  if (compare->IsLiteralCompareUndefined(&expr)) {
-    EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+    return true;
+  }
+
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kNullValue);
     return true;
   }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 803c618..fbb6979 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -83,12 +83,9 @@
         scope_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        stack_height_(0),
         context_(NULL),
         bailout_entries_(0),
-        stack_checks_(2),  // There's always at least one.
-        forward_bailout_stack_(NULL),
-        forward_bailout_pending_(NULL) {
+        stack_checks_(2) {  // There's always at least one.
   }
 
   static bool MakeCode(CompilationInfo* info);
@@ -96,6 +93,8 @@
   void Generate(CompilationInfo* info);
   void PopulateDeoptimizationData(Handle<Code> code);
 
+  Handle<FixedArray> handler_table() { return handler_table_; }
+
   class StateField : public BitField<State, 0, 8> { };
   class PcField    : public BitField<unsigned, 8, 32-8> { };
 
@@ -276,27 +275,8 @@
     }
   };
 
-  // The forward bailout stack keeps track of the expressions that can
-  // bail out to just before the control flow is split in a child
-  // node. The stack elements are linked together through the parent
-  // link when visiting expressions in test contexts after requesting
-  // bailout in child forwarding.
-  class ForwardBailoutStack BASE_EMBEDDED {
-   public:
-    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
-        : expr_(expr), parent_(parent) { }
-
-    Expression* expr() const { return expr_; }
-    ForwardBailoutStack* parent() const { return parent_; }
-
-   private:
-    Expression* const expr_;
-    ForwardBailoutStack* const parent_;
-  };
-
   // Type of a member function that generates inline code for a native function.
-  typedef void (FullCodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
+  typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
 
   static const InlineFunctionGenerator kInlineFunctionGenerators[];
 
@@ -357,23 +337,22 @@
   // need the write barrier if location is CONTEXT.
   MemOperand VarOperand(Variable* var, Register scratch);
 
-  // Forward the bailout responsibility for the given expression to
-  // the next child visited (which must be in a test context).
-  void ForwardBailoutToChild(Expression* expr);
-
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -381,9 +360,14 @@
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, expr, if_true, if_false, fall_through);
-    VisitInCurrentContext(expr);
+    Visit(expr);
+    // For test contexts, we prepare for bailout before branching, not at
+    // the end of the entire expression.  This happens as part of visiting
+    // the expression.
   }
 
+  void VisitInDuplicateContext(Expression* expr);
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
@@ -391,29 +375,22 @@
   // Try to perform a comparison as a fast inlined literal compare if
   // the operands allow it.  Returns true if the compare operations
   // has been matched and all code generated; false otherwise.
-  bool TryLiteralCompare(CompareOperation* compare,
-                         Label* if_true,
-                         Label* if_false,
-                         Label* fall_through);
+  bool TryLiteralCompare(CompareOperation* compare);
 
   // Platform-specific code for comparing the type of a value with
   // a given literal string.
   void EmitLiteralCompareTypeof(Expression* expr,
-                                Handle<String> check,
-                                Label* if_true,
-                                Label* if_false,
-                                Label* fall_through);
+                                Expression* sub_expr,
+                                Handle<String> check);
 
-  // Platform-specific code for strict equality comparison with
-  // the undefined value.
-  void EmitLiteralCompareUndefined(Expression* expr,
-                                   Label* if_true,
-                                   Label* if_false,
-                                   Label* fall_through);
+  // Platform-specific code for equality comparison with a nil-like value.
+  void EmitLiteralCompareNil(CompareOperation* expr,
+                             Expression* sub_expr,
+                             NilValue nil);
 
   // Bailout support.
   void PrepareForBailout(Expression* node, State state);
-  void PrepareForBailoutForId(int id, State state);
+  void PrepareForBailoutForId(unsigned id, State state);
 
   // Record a call's return site offset, used to rebuild the frame if the
   // called function was inlined at the site.
@@ -424,7 +401,7 @@
   // canonical JS true value so we will insert a (dead) test against true at
   // the actual bailout target from the optimized code. If not
   // should_normalize, the true and false labels are ignored.
-  void PrepareForBailoutBeforeSplit(State state,
+  void PrepareForBailoutBeforeSplit(Expression* expr,
                                     bool should_normalize,
                                     Label* if_true,
                                     Label* if_false);
@@ -432,7 +409,7 @@
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
   void EmitDeclaration(VariableProxy* proxy,
-                       Variable::Mode mode,
+                       VariableMode mode,
                        FunctionLiteral* function,
                        int* global_count);
 
@@ -440,7 +417,7 @@
   // a loop.
   void EmitStackCheck(IterationStatement* stmt);
   // Record the OSR AST id corresponding to a stack check in the code.
-  void RecordStackCheck(int osr_ast_id);
+  void RecordStackCheck(unsigned osr_ast_id);
   // Emit a table of stack check ids and pcs into the code stream.  Return
   // the offset of the start of the table.
   unsigned EmitStackCheckTable();
@@ -459,7 +436,7 @@
   void EmitInlineRuntimeCall(CallRuntime* expr);
 
 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
-  void Emit##name(ZoneList<Expression*>* arguments);
+  void Emit##name(CallRuntime* expr);
   INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
   INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
 #undef EMIT_INLINE_RUNTIME_CALL
@@ -475,13 +452,8 @@
                                  Label* done);
   void EmitVariableLoad(VariableProxy* proxy);
 
-  enum ResolveEvalFlag {
-    SKIP_CONTEXT_LOOKUP,
-    PERFORM_CONTEXT_LOOKUP
-  };
-
   // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
+  void EmitResolvePossiblyDirectEval(int arg_count);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -548,35 +520,6 @@
     loop_depth_--;
   }
 
-#if defined(V8_TARGET_ARCH_IA32)
-  int stack_height() { return stack_height_; }
-  void set_stack_height(int depth) { stack_height_ = depth; }
-  void increment_stack_height() { stack_height_++; }
-  void increment_stack_height(int delta) { stack_height_ += delta; }
-  void decrement_stack_height() {
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ > 0);
-    }
-    stack_height_--;
-  }
-  void decrement_stack_height(int delta) {
-    stack_height_-= delta;
-    if (FLAG_verify_stack_height) {
-      ASSERT(stack_height_ >= 0);
-    }
-  }
-  // Call this function only if FLAG_verify_stack_height is true.
-  void verify_stack_height();  // Generates a runtime check of esp - ebp.
-#else
-  int stack_height() { return 0; }
-  void set_stack_height(int depth) {}
-  void increment_stack_height() {}
-  void increment_stack_height(int delta) {}
-  void decrement_stack_height() {}
-  void decrement_stack_height(int delta) {}
-  void verify_stack_height() {}
-#endif  // V8_TARGET_ARCH_IA32
-
   MacroAssembler* masm() { return masm_; }
 
   class ExpressionContext;
@@ -586,9 +529,11 @@
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   bool is_native() { return info_->is_native(); }
-  bool is_strict_mode() { return function()->strict_mode(); }
-  StrictModeFlag strict_mode_flag() {
-    return is_strict_mode() ? kStrictMode : kNonStrictMode;
+  bool is_classic_mode() {
+    return language_mode() == CLASSIC_MODE;
+  }
+  LanguageMode language_mode() {
+    return function()->language_mode();
   }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return scope_; }
@@ -618,7 +563,6 @@
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
-  void VisitInCurrentContext(Expression* expr);
 
   void VisitForTypeofValue(Expression* expr);
 
@@ -637,10 +581,6 @@
 
     virtual ~ExpressionContext() {
       codegen_->set_new_context(old_);
-      if (FLAG_verify_stack_height) {
-        ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
-        codegen()->verify_stack_height();
-      }
     }
 
     Isolate* isolate() const { return codegen_->isolate(); }
@@ -694,7 +634,6 @@
     FullCodeGenerator* codegen() const { return codegen_; }
     MacroAssembler* masm() const { return masm_; }
     MacroAssembler* masm_;
-    int expected_stack_height_;  // The expected stack height esp - ebp on exit.
 
    private:
     const ExpressionContext* old_;
@@ -704,9 +643,7 @@
   class AccumulatorValueContext : public ExpressionContext {
    public:
     explicit AccumulatorValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -727,9 +664,7 @@
   class StackValueContext : public ExpressionContext {
    public:
     explicit StackValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height() + 1;
-    }
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -758,9 +693,7 @@
           condition_(condition),
           true_label_(true_label),
           false_label_(false_label),
-          fall_through_(fall_through) {
-      expected_stack_height_ = codegen->stack_height();
-    }
+          fall_through_(fall_through) { }
 
     static const TestContext* cast(const ExpressionContext* context) {
       ASSERT(context->IsTest());
@@ -797,10 +730,7 @@
   class EffectContext : public ExpressionContext {
    public:
     explicit EffectContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) {
-      expected_stack_height_ = codegen->stack_height();
-    }
-
+        : ExpressionContext(codegen) { }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -824,12 +754,10 @@
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
-  int stack_height_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
-  ForwardBailoutStack* forward_bailout_stack_;
-  ForwardBailoutStack* forward_bailout_pending_;
+  Handle<FixedArray> handler_table_;
 
   friend class NestedStatement;
 
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 68cb053..b386bed 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1115,13 +1115,13 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int current_abbreviation = 4;
 
       for (int param = 0; param < params; ++param) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+            *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1312,7 +1312,7 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.NumberOfLocals();
+      int locals = scope_info.LocalCount();
       int total_children =
           params + slots + context_slots + internal_slots + locals + 2;
 
diff --git a/src/globals.h b/src/globals.h
index 6c6966a..30b676c 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -230,6 +230,9 @@
 
 const int kDoubleSizeLog2 = 3;
 
+// Size of the state of a the random number generator.
+const int kRandomStateSize = 2 * kIntSize;
+
 #if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
@@ -255,6 +258,10 @@
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+
 // ASCII/UC16 constants
 // Code-point values in Unicode 4.0 are 21 bits wide.
 typedef uint16_t uc16;
@@ -287,7 +294,7 @@
 // The USE(x) template is used to silence C++ compiler warnings
 // issued for (yet) unused variables (typically parameters).
 template <typename T>
-static inline void USE(T) { }
+inline void USE(T) { }
 
 
 // FUNCTION_ADDR(f) gets the address of a C function f.
@@ -351,6 +358,39 @@
 class FreeStoreAllocationPolicy;
 template <typename T, class P = FreeStoreAllocationPolicy> class List;
 
+// -----------------------------------------------------------------------------
+// Declarations for use in both the preparser and the rest of V8.
+
+// The different language modes that V8 implements. ES5 defines two language
+// modes: an unrestricted mode respectively a strict mode which are indicated by
+// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
+// for the next ES standard specify a new third mode which is called 'extended
+// mode'. The extended mode is only available if the harmony flag is set. It is
+// based on the 'strict mode' and adds new functionality to it. This means that
+// most of the semantics of these two modes coincide.
+//
+// In the current draft the term 'base code' is used to refer to code that is
+// neither in strict nor extended mode. However, the more distinguishing term
+// 'classic mode' is used in V8 instead to avoid mix-ups.
+
+enum LanguageMode {
+  CLASSIC_MODE,
+  STRICT_MODE,
+  EXTENDED_MODE
+};
+
+
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+//
+// This flag is used in the backend to represent the language mode. So far
+// there is no semantic difference between the strict and the extended mode in
+// the backend, so both modes are represented by the kStrictMode value.
+enum StrictModeFlag {
+  kNonStrictMode,
+  kStrictMode
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index c482fa6..2ff797d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -190,7 +190,11 @@
 
   // Inobject slack tracking will reclaim redundant inobject space later,
   // so we can afford to adjust the estimate generously.
-  return estimate + 8;
+  if (FLAG_clever_optimizations) {
+    return estimate + 8;
+  } else {
+    return estimate + 3;
+  }
 }
 
 
@@ -214,10 +218,10 @@
 }
 
 
-Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object) {
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
   CALL_HEAP_FUNCTION(object->GetIsolate(),
                      object->NormalizeElements(),
-                     SeededNumberDictionary);
+                     NumberDictionary);
 }
 
 
@@ -229,14 +233,14 @@
 }
 
 
-Handle<SeededNumberDictionary> SeededNumberDictionarySet(
-    Handle<SeededNumberDictionary> dictionary,
+Handle<NumberDictionary> NumberDictionarySet(
+    Handle<NumberDictionary> dictionary,
     uint32_t index,
     Handle<Object> value,
     PropertyDetails details) {
   CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
                      dictionary->Set(index, *value, details),
-                     SeededNumberDictionary);
+                     NumberDictionary);
 }
 
 
@@ -372,24 +376,6 @@
 }
 
 
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
-                           Handle<String> name,
-                           LookupResult* result) {
-  PropertyAttributes attributes;
-  Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(isolate,
-                     obj->GetProperty(*obj, result, *name, &attributes),
-                     Object);
-}
-
-
-Handle<Object> GetElement(Handle<Object> obj,
-                          uint32_t index) {
-  Isolate* isolate = Isolate::Current();
-  CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
-}
-
-
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -421,17 +407,18 @@
 }
 
 
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag) {
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+                                 Handle<String> key,
+                                 Handle<Object> value) {
   CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->GetHiddenProperties(flag),
+                     obj->SetHiddenProperty(*key, *value),
                      Object);
 }
 
 
-int GetIdentityHash(Handle<JSObject> obj) {
+int GetIdentityHash(Handle<JSReceiver> obj) {
   CALL_AND_RETRY(obj->GetIsolate(),
-                 obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+                 obj->GetIdentityHash(ALLOW_CREATION),
                  return Smi::cast(__object__)->value(),
                  return 0);
 }
@@ -499,6 +486,14 @@
 }
 
 
+Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+                                      ElementsKind to_kind) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->TransitionElementsKind(to_kind),
+                     Object);
+}
+
+
 Handle<JSObject> Copy(Handle<JSObject> obj) {
   Isolate* isolate = obj->GetIsolate();
   CALL_HEAP_FUNCTION(isolate,
@@ -521,8 +516,9 @@
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
   Foreign* foreign = Script::cast(wrapper->value())->wrapper();
-  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
-  foreign->set_address(0);
+  ASSERT(foreign->foreign_address() ==
+         reinterpret_cast<Address>(cache.location()));
+  foreign->set_foreign_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -530,10 +526,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->address() != NULL) {
+  if (script->wrapper()->foreign_address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->address()));
+        reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -549,7 +545,8 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_foreign_address(
+      reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
@@ -665,6 +662,19 @@
   return right + script->line_offset()->value();
 }
 
+// Convert code position into column number.
+int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
+  int line_number = GetScriptLineNumber(script, code_pos);
+  if (line_number == -1) return -1;
+
+  AssertNoAllocation no_allocation;
+  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+  line_number = line_number - script->line_offset()->value();
+  if (line_number == 0) return code_pos + script->column_offset()->value();
+  int prev_line_end_pos =
+      Smi::cast(line_ends_array->get(line_number - 1))->value();
+  return code_pos - (prev_line_end_pos + 1);
+}
 
 int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
   AssertNoAllocation no_allocation;
@@ -696,7 +706,7 @@
 
 
 // Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
                                                  Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
@@ -718,7 +728,7 @@
 
 
 // Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
                                                    Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
@@ -749,8 +759,9 @@
 }
 
 
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
-                                          KeyCollectionType type) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+                                          KeyCollectionType type,
+                                          bool* threw) {
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
@@ -765,6 +776,16 @@
   for (Handle<Object> p = object;
        *p != isolate->heap()->null_value();
        p = Handle<Object>(p->GetPrototype(), isolate)) {
+    if (p->IsJSProxy()) {
+      Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
+      Handle<Object> args[] = { proxy };
+      Handle<Object> names = Execution::Call(
+          isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+      if (*threw) return content;
+      content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
+      break;
+    }
+
     Handle<JSObject> current(JSObject::cast(*p), isolate);
 
     // Check access rights if required.
@@ -831,11 +852,11 @@
 }
 
 
-Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
   Isolate* isolate = object->GetIsolate();
   isolate->counters()->for_in()->Increment();
-  Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
-                                                       INCLUDE_PROTOS);
+  Handle<FixedArray> elements =
+      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
   return isolate->factory()->NewJSArrayWithElements(elements);
 }
 
@@ -885,8 +906,24 @@
 }
 
 
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+                                       Handle<Object> key) {
+  CALL_HEAP_FUNCTION(table->GetIsolate(),
+                     table->Add(*key),
+                     ObjectHashSet);
+}
+
+
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+                                          Handle<Object> key) {
+  CALL_HEAP_FUNCTION(table->GetIsolate(),
+                     table->Remove(*key),
+                     ObjectHashSet);
+}
+
+
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<Object> key,
                                                Handle<Object> value) {
   CALL_HEAP_FUNCTION(table->GetIsolate(),
                      table->Put(*key, *value),
@@ -894,53 +931,4 @@
 }
 
 
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                    ClearExceptionFlag flag) {
-  return shared->is_compiled() || CompileLazyShared(shared, flag);
-}
-
-
-static bool CompileLazyHelper(CompilationInfo* info,
-                              ClearExceptionFlag flag) {
-  // Compile the source information to a code object.
-  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
-  ASSERT(!info->isolate()->has_pending_exception());
-  bool result = Compiler::CompileLazy(info);
-  ASSERT(result != Isolate::Current()->has_pending_exception());
-  if (!result && flag == CLEAR_EXCEPTION) {
-    info->isolate()->clear_pending_exception();
-  }
-  return result;
-}
-
-
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
-                       ClearExceptionFlag flag) {
-  CompilationInfo info(shared);
-  return CompileLazyHelper(&info, flag);
-}
-
-
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
-  bool result = true;
-  if (function->shared()->is_compiled()) {
-    function->ReplaceCode(function->shared()->code());
-    function->shared()->set_code_age(0);
-  } else {
-    CompilationInfo info(function);
-    result = CompileLazyHelper(&info, flag);
-    ASSERT(!result || function->is_compiled());
-  }
-  return result;
-}
-
-
-bool CompileOptimized(Handle<JSFunction> function,
-                      int osr_ast_id,
-                      ClearExceptionFlag flag) {
-  CompilationInfo info(function);
-  info.SetOptimizing(osr_ast_id);
-  return CompileLazyHelper(&info, flag);
-}
-
 } }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index 5674120..cfa65b3 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -170,11 +170,11 @@
 void NormalizeProperties(Handle<JSObject> object,
                          PropertyNormalizationMode mode,
                          int expected_additional_properties);
-Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object);
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
 void TransformToFastProperties(Handle<JSObject> object,
                                int unused_property_fields);
-MUST_USE_RESULT Handle<SeededNumberDictionary> SeededNumberDictionarySet(
-    Handle<SeededNumberDictionary> dictionary,
+MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
+    Handle<NumberDictionary> dictionary,
     uint32_t index,
     Handle<Object> value,
     PropertyDetails details);
@@ -224,12 +224,6 @@
                              Handle<Object> value,
                              PropertyAttributes attributes = NONE);
 
-Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
-                                          Handle<String> key,
-                                          Handle<Object> value,
-                                          PropertyAttributes attributes,
-                                          StrictModeFlag strict_mode);
-
 MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
                                           uint32_t index,
                                           Handle<Object> value,
@@ -240,20 +234,15 @@
                              Handle<Object> value,
                              StrictModeFlag strict_mode);
 
+Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+                                      ElementsKind to_kind);
+
 Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name);
 
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
-                           Handle<String> name,
-                           LookupResult* result);
-
-
-Handle<Object> GetElement(Handle<Object> obj,
-                          uint32_t index);
-
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -263,14 +252,13 @@
 
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
 
-// Return the object's hidden properties object. If the object has no hidden
-// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
-// hidden property object will be allocated. Otherwise Heap::undefined_value
-// is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag);
+// Sets a hidden property on an object. Returns obj on success, undefined
+// if trying to set the property on a detached proxy.
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+                                 Handle<String> key,
+                                 Handle<Object> value);
 
-int GetIdentityHash(Handle<JSObject> obj);
+int GetIdentityHash(Handle<JSReceiver> obj);
 
 Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
 Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -298,21 +286,23 @@
 int GetScriptLineNumber(Handle<Script> script, int code_position);
 // The safe version does not make heap allocations but may work much slower.
 int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
+int GetScriptColumnNumber(Handle<Script> script, int code_position);
 
 // Computes the enumerable keys from interceptors. Used for debug mirrors and
 // by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
                                                  Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
                                                    Handle<JSObject> object);
 
 enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
 
 // Computes the enumerable keys for a JSObject. Used for implementing
 // "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
-                                          KeyCollectionType type);
-Handle<JSArray> GetKeysFor(Handle<JSObject> object);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
+                                          KeyCollectionType type,
+                                          bool* threw);
+Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
 Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
                                        bool cache_result);
 
@@ -347,26 +337,16 @@
 
 Handle<Object> PreventExtensions(Handle<JSObject> object);
 
+Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
+                                       Handle<Object> key);
+
+Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
+                                          Handle<Object> key);
+
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<Object> key,
                                                Handle<Object> value);
 
-// Does lazy compilation of the given function. Returns true on success and
-// false if the compilation resulted in a stack overflow.
-enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
-
-bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                    ClearExceptionFlag flag);
-
-bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
-                       ClearExceptionFlag flag);
-
-bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
-
-bool CompileOptimized(Handle<JSFunction> function,
-                      int osr_ast_id,
-                      ClearExceptionFlag flag);
-
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/hashmap.cc b/src/hashmap.cc
index 1422afd..0b404a9 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -36,13 +36,7 @@
 namespace v8 {
 namespace internal {
 
-Allocator HashMap::DefaultAllocator;
-
-
-HashMap::HashMap() {
-  allocator_ = NULL;
-  match_ = NULL;
-}
+Allocator* HashMap::DefaultAllocator = ::new Allocator();
 
 
 HashMap::HashMap(MatchFun match,
diff --git a/src/hashmap.h b/src/hashmap.h
index 5c13212..d2d1faf 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -46,19 +46,14 @@
 
 class HashMap {
  public:
-  static Allocator DefaultAllocator;
+  static Allocator* DefaultAllocator;
 
   typedef bool (*MatchFun) (void* key1, void* key2);
 
-  // Dummy constructor.  This constructor doesn't set up the hash
-  // map properly so don't use it unless you have good reason (e.g.,
-  // you know that the HashMap will never be used).
-  HashMap();
-
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
   explicit HashMap(MatchFun match,
-                   Allocator* allocator = &DefaultAllocator,
+                   Allocator* allocator = DefaultAllocator,
                    uint32_t initial_capacity = 8);
 
   ~HashMap();
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 7b666af..6ff350a 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -33,20 +33,51 @@
 #include "list-inl.h"
 #include "objects.h"
 #include "v8-counters.h"
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
 
 namespace v8 {
 namespace internal {
 
 void PromotionQueue::insert(HeapObject* target, int size) {
+  if (emergency_stack_ != NULL) {
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+    NewSpacePage* rear_page =
+        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+    ASSERT(!rear_page->prev_page()->is_anchor());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
+    ActivateGuardIfOnTheSamePage();
+  }
+
+  if (guard_) {
+    ASSERT(GetHeadPage() ==
+           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
+
+    if ((rear_ - 2) < limit_) {
+      RelocateQueueHead();
+      emergency_stack_->Add(Entry(target, size));
+      return;
+    }
+  }
+
   *(--rear_) = reinterpret_cast<intptr_t>(target);
   *(--rear_) = size;
   // Assert no overflow into live objects.
-  ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+#ifdef DEBUG
+  SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+                              reinterpret_cast<Address>(rear_));
+#endif
 }
 
 
-int Heap::MaxObjectSizeInPagedSpace() {
-  return Page::kMaxHeapObjectSize;
+void PromotionQueue::ActivateGuardIfOnTheSamePage() {
+  guard_ = guard_ ||
+      heap_->new_space()->active_space()->current_page()->address() ==
+      GetHeadPage()->address();
 }
 
 
@@ -83,8 +114,8 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -116,8 +147,8 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -181,7 +212,7 @@
   } else if (CODE_SPACE == space) {
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
-    result = lo_space_->AllocateRaw(size_in_bytes);
+    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
   } else if (CELL_SPACE == space) {
     result = cell_space_->AllocateRaw(size_in_bytes);
   } else {
@@ -193,19 +224,21 @@
 }
 
 
-MaybeObject* Heap::NumberFromInt32(int32_t value) {
+MaybeObject* Heap::NumberFromInt32(
+    int32_t value, PretenureFlag pretenure) {
   if (Smi::IsValid(value)) return Smi::FromInt(value);
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastI2D(value));
+  return AllocateHeapNumber(FastI2D(value), pretenure);
 }
 
 
-MaybeObject* Heap::NumberFromUint32(uint32_t value) {
+MaybeObject* Heap::NumberFromUint32(
+    uint32_t value, PretenureFlag pretenure) {
   if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
     return Smi::FromInt((int32_t)value);
   }
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastUI2D(value));
+  return AllocateHeapNumber(FastUI2D(value), pretenure);
 }
 
 
@@ -220,10 +253,8 @@
   // Dispose of the C++ object if it has not already been disposed.
   if (*resource_addr != NULL) {
     (*resource_addr)->Dispose();
+    *resource_addr = NULL;
   }
-
-  // Clear the resource pointer in the string.
-  *resource_addr = NULL;
 }
 
 
@@ -265,6 +296,11 @@
 }
 
 
+bool Heap::InNewSpace(Address addr) {
+  return new_space_.Contains(addr);
+}
+
+
 bool Heap::InFromSpace(Object* object) {
   return new_space_.FromSpaceContains(object);
 }
@@ -275,29 +311,36 @@
 }
 
 
+bool Heap::OldGenerationAllocationLimitReached() {
+  if (!incremental_marking()->IsStopped()) return false;
+  return OldGenerationSpaceAvailable() < 0;
+}
+
+
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   // An object should be promoted if:
   // - the object has survived a scavenge operation or
   // - to space is already 25% full.
-  return old_address < new_space_.age_mark()
-      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
+  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Address age_mark = new_space_.age_mark();
+  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+      (!page->ContainsLimit(age_mark) || old_address < age_mark);
+  return below_mark || (new_space_.Size() + object_size) >=
+                        (new_space_.EffectiveCapacity() >> 2);
 }
 
 
 void Heap::RecordWrite(Address address, int offset) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  SLOW_ASSERT(Contains(address + offset));
-  Page::FromAddress(address)->MarkRegionDirty(address + offset);
+  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
 }
 
 
 void Heap::RecordWrites(Address address, int start, int len) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  Page* page = Page::FromAddress(address);
-  page->SetRegionMarks(page->GetRegionMarks() |
-      page->GetRegionMaskForSpan(address + start, len * kPointerSize));
+  if (!InNewSpace(address)) {
+    for (int i = 0; i < len; i++) {
+      store_buffer_.Mark(address + start + i * kPointerSize);
+    }
+  }
 }
 
 
@@ -336,38 +379,12 @@
 
 
 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
   CopyWords(reinterpret_cast<Object**>(dst),
             reinterpret_cast<Object**>(src),
             byte_size / kPointerSize);
 }
 
 
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-
-  Page* page = Page::FromAddress(dst);
-  uint32_t marks = page->GetRegionMarks();
-
-  for (int remaining = byte_size / kPointerSize;
-       remaining > 0;
-       remaining--) {
-    Memory::Object_at(dst) = Memory::Object_at(src);
-
-    if (InNewSpace(Memory::Object_at(dst))) {
-      marks |= page->GetRegionMaskForAddress(dst);
-    }
-
-    dst += kPointerSize;
-    src += kPointerSize;
-  }
-
-  page->SetRegionMarks(marks);
-}
-
-
 void Heap::MoveBlock(Address dst, Address src, int byte_size) {
   ASSERT(IsAligned(byte_size, kPointerSize));
 
@@ -387,16 +404,6 @@
 }
 
 
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-  ASSERT((dst < src) || (dst >= (src + byte_size)));
-
-  CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-}
-
-
 void Heap::ScavengePointer(HeapObject** p) {
   ScavengeObject(p, *p);
 }
@@ -414,7 +421,9 @@
   // If the first word is a forwarding address, the object has already been
   // copied.
   if (first_word.IsForwardingAddress()) {
-    *p = first_word.ToForwardingAddress();
+    HeapObject* dest = first_word.ToForwardingAddress();
+    ASSERT(HEAP->InFromSpace(*p));
+    *p = dest;
     return;
   }
 
@@ -459,7 +468,7 @@
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
     }
   } else {
     // Avoid underflow.
@@ -476,6 +485,7 @@
   roots_[kLastScriptIdRootIndex] = last_script_id;
 }
 
+
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -581,11 +591,11 @@
 #ifdef DEBUG
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     ASSERT(heap_->InNewSpace(new_space_strings_[i]));
-    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
-    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
+    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
   }
 #endif
 }
@@ -600,7 +610,9 @@
 
 void ExternalStringTable::ShrinkNewStrings(int position) {
   new_space_strings_.Rewind(position);
-  Verify();
+  if (FLAG_verify_heap) {
+    Verify();
+  }
 }
 
 
@@ -688,15 +700,6 @@
 }
 
 
-void MarkCompactCollector::SetMark(HeapObject* obj) {
-  tracer_->increment_marked_count();
-#ifdef DEBUG
-  UpdateLiveObjectCount(obj);
-#endif
-  obj->SetMark();
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 7e613e9..46c63c2 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -114,7 +114,6 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
-      HEAP->CollectAllGarbage(true);
       HeapSnapshotGenerator generator(result, control);
       generation_completed = generator.GenerateSnapshot();
       break;
diff --git a/src/heap.cc b/src/heap.cc
index c91f769..9bb4e40 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -36,13 +36,16 @@
 #include "deoptimizer.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
+#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -58,10 +61,6 @@
 namespace internal {
 
 
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
 static Mutex* gc_initializer_mutex = OS::CreateMutex();
 
 
@@ -70,27 +69,21 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-      reserved_semispace_size_(2*MB),
-      max_semispace_size_(2*MB),
-      initial_semispace_size_(128*KB),
-      max_old_generation_size_(192*MB),
-      max_executable_size_(max_old_generation_size_),
+#define LUMP_OF_MEMORY (128 * KB)
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
-      reserved_semispace_size_(16*MB),
-      max_semispace_size_(16*MB),
-      initial_semispace_size_(1*MB),
-      max_old_generation_size_(1400*MB),
-      max_executable_size_(256*MB),
+#define LUMP_OF_MEMORY (2 * MB)
       code_range_size_(512*MB),
 #else
-      reserved_semispace_size_(8*MB),
-      max_semispace_size_(8*MB),
-      initial_semispace_size_(512*KB),
-      max_old_generation_size_(700*MB),
-      max_executable_size_(128*MB),
+#define LUMP_OF_MEMORY MB
       code_range_size_(0),
 #endif
+      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
+      max_executable_size_(128l * LUMP_OF_MEMORY),
+
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
 // Will be 4 * reserved_semispace_size_ to ensure that young
@@ -100,6 +93,7 @@
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
+      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_pointer_space_(NULL),
       old_data_space_(NULL),
@@ -109,7 +103,6 @@
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
-      mc_count_(0),
       ms_count_(0),
       gc_count_(0),
       unflattened_strings_length_(0),
@@ -119,12 +112,16 @@
       disallow_allocation_failure_(false),
       debug_utils_(NULL),
 #endif  // DEBUG
+      new_space_high_promotion_mode_active_(false),
       old_gen_promotion_limit_(kMinimumPromotionLimit),
       old_gen_allocation_limit_(kMinimumAllocationLimit),
+      old_gen_limit_factor_(1),
+      size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
       amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
       old_gen_exhausted_(false),
+      store_buffer_rebuilder_(store_buffer()),
       hidden_symbol_(NULL),
       global_gc_prologue_callback_(NULL),
       global_gc_epilogue_callback_(NULL),
@@ -141,12 +138,15 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
-      page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
+      promotion_queue_(this),
       configured_(false),
-      is_safe_to_read_maps_(true) {
+      chunks_queued_for_free_(NULL) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
   // easier if you can define it as part of the build environment.
@@ -224,29 +224,10 @@
 
 
 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  uint32_t marker = Memory::uint32_at(object->address());
-  if (marker == MarkCompactCollector::kSingleFreeEncoding) {
-    return kIntSize;
-  } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
-    return Memory::int_at(object->address() + kIntSize);
-  } else {
-    MapWord map_word = object->map_word();
-    Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
-    Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
-    return object->SizeFromMap(map);
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
   }
+  return object->SizeFromMap(object->map());
 }
 
 
@@ -400,6 +381,7 @@
 #endif  // DEBUG
 
   LiveObjectList::GCPrologue();
+  store_buffer()->GCPrologue();
 }
 
 intptr_t Heap::SizeOfObjects() {
@@ -412,6 +394,7 @@
 }
 
 void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
   LiveObjectList::GCEpilogue();
 #ifdef DEBUG
   allow_allocation(true);
@@ -443,13 +426,13 @@
 }
 
 
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(int flags) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetForceCompaction(force_compaction);
+  mark_compact_collector_.SetFlags(flags);
   CollectGarbage(OLD_POINTER_SPACE);
-  mark_compact_collector_.SetForceCompaction(false);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
 }
 
 
@@ -457,8 +440,6 @@
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector()->SetForceCompaction(true);
-
   // Major GC would invoke weak handle callbacks on weakly reachable
   // handles, but won't collect weakly reachable objects until next
   // major GC.  Therefore if we collect aggressively and weak handle callback
@@ -467,13 +448,17 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+  isolate_->compilation_cache()->Clear();
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
       break;
     }
   }
-  mark_compact_collector()->SetForceCompaction(false);
+  mark_compact_collector()->SetFlags(kNoGCFlags);
+  new_space_.Shrink();
+  incremental_marking()->UncommitMarkingDeque();
 }
 
 
@@ -490,6 +475,23 @@
   allocation_timeout_ = Max(6, FLAG_gc_interval);
 #endif
 
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->PreciseSweepingRequired() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+    }
+    collector = SCAVENGER;
+  }
+
   bool next_gc_likely_to_collect_more = false;
 
   { GCTracer tracer(this);
@@ -512,13 +514,24 @@
     GarbageCollectionEpilogue();
   }
 
+  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+  if (incremental_marking()->IsStopped()) {
+    if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+      incremental_marking()->Start();
+    }
+  }
+
   return next_gc_likely_to_collect_more;
 }
 
 
 void Heap::PerformScavenge() {
   GCTracer tracer(this);
-  PerformGarbageCollection(SCAVENGER, &tracer);
+  if (incremental_marking()->IsStopped()) {
+    PerformGarbageCollection(SCAVENGER, &tracer);
+  } else {
+    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+  }
 }
 
 
@@ -531,7 +544,7 @@
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
         // Check that the symbol is actually a symbol.
-        ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
+        ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
       }
     }
   }
@@ -563,7 +576,9 @@
   PagedSpace* cell_space = Heap::cell_space();
   LargeObjectSpace* lo_space = Heap::lo_space();
   bool gc_performed = true;
-  while (gc_performed) {
+  int counter = 0;
+  static const int kThreshold = 20;
+  while (gc_performed && counter++ < kThreshold) {
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
       Heap::CollectGarbage(NEW_SPACE);
@@ -602,6 +617,11 @@
       gc_performed = true;
     }
   }
+
+  if (gc_performed) {
+    // Failed to reserve the space after several attempts.
+    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
+  }
 }
 
 
@@ -610,13 +630,6 @@
 
   // Committing memory to from space failed.
   // Try shrinking and try again.
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->RelinkPageListInChunkOrder(true);
-  }
-
   Shrink();
   if (new_space_.CommitFromSpaceIfNeeded()) return;
 
@@ -631,13 +644,17 @@
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    // Get the caches for this context:
-    FixedArray* caches =
-      Context::cast(context)->jsfunction_result_caches();
-    // Clear the caches:
-    int length = caches->length();
-    for (int i = 0; i < length; i++) {
-      JSFunctionResultCache::cast(caches->get(i))->Clear();
+    // Get the caches for this context. GC can happen when the context
+    // is not fully initialized, so the caches can be undefined.
+    Object* caches_or_undefined =
+        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
+    if (!caches_or_undefined->IsUndefined()) {
+      FixedArray* caches = FixedArray::cast(caches_or_undefined);
+      // Clear the caches:
+      int length = caches->length();
+      for (int i = 0; i < length; i++) {
+        JSFunctionResultCache::cast(caches->get(i))->Clear();
+      }
     }
     // Get the next context:
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -647,34 +664,25 @@
 
 
 void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    Context::cast(context)->normalized_map_cache()->Clear();
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache =
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      NormalizedMapCache::cast(cache)->Clear();
+    }
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
 
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
-  ALL_VALID,
-  ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
-                                        PageWatermarkValidity validity) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  bool expected_value = (validity == ALL_VALID);
-  while (it.has_next()) {
-    Page* page = it.next();
-    ASSERT(page->IsWatermarkValid() == expected_value);
-  }
-}
-#endif
-
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -707,7 +715,9 @@
     PROFILE(isolate_, CodeMovingGCEvent());
   }
 
-  VerifySymbolTable();
+  if (FLAG_verify_heap) {
+    VerifySymbolTable();
+  }
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -727,6 +737,13 @@
 
   int start_new_space_size = Heap::new_space()->SizeAsInt();
 
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
@@ -736,11 +753,33 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    intptr_t old_gen_size = PromotedSpaceSize();
-    old_gen_promotion_limit_ =
-        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
-    old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    if (!new_space_high_promotion_mode_active_ &&
+        new_space_.Capacity() == new_space_.MaximumCapacity() &&
+        IsStableOrIncreasingSurvivalTrend() &&
+        IsHighSurvivalRate()) {
+      // Stable high survival rates even though young generation is at
+      // maximum capacity indicates that most objects will be promoted.
+      // To decrease scavenger pauses and final mark-sweep pauses, we
+      // have to limit maximal capacity of the young generation.
+      new_space_high_promotion_mode_active_ = true;
+      if (FLAG_trace_gc) {
+        PrintF("Limited new space size due to high promotion rate: %d MB\n",
+               new_space_.InitialCapacity() / MB);
+      }
+    } else if (new_space_high_promotion_mode_active_ &&
+        IsDecreasingSurvivalTrend() &&
+        !IsHighSurvivalRate()) {
+      // Decreasing low survival rates might indicate that the above high
+      // promotion mode is over and we should allow the young generation
+      // to grow again.
+      new_space_high_promotion_mode_active_ = false;
+      if (FLAG_trace_gc) {
+        PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
+               new_space_.MaximumCapacity() / MB);
+      }
+    }
+
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -750,10 +789,16 @@
       // In this case we aggressively raise old generation memory limits to
       // postpone subsequent mark-sweep collection and thus trade memory
       // space for the mutation speed.
-      old_gen_promotion_limit_ *= 2;
-      old_gen_allocation_limit_ *= 2;
+      old_gen_limit_factor_ = 2;
+    } else {
+      old_gen_limit_factor_ = 1;
     }
 
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -763,6 +808,11 @@
     UpdateSurvivalRateTrend(start_new_space_size);
   }
 
+  if (new_space_high_promotion_mode_active_ &&
+      new_space_.Capacity() > new_space_.InitialCapacity()) {
+    new_space_.Shrink();
+  }
+
   isolate_->counters()->objs_since_last_young()->Set(0);
 
   gc_post_processing_depth_++;
@@ -782,9 +832,7 @@
         amount_of_external_allocated_memory_;
   }
 
-  GCCallbackFlags callback_flags = tracer->is_compacting()
-      ? kGCCallbackFlagCompacted
-      : kNoGCCallbackFlags;
+  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -796,7 +844,9 @@
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     global_gc_epilogue_callback_();
   }
-  VerifySymbolTable();
+  if (FLAG_verify_heap) {
+    VerifySymbolTable();
+  }
 
   return next_gc_likely_to_collect_more;
 }
@@ -808,34 +858,24 @@
 
   mark_compact_collector_.Prepare(tracer);
 
-  bool is_compacting = mark_compact_collector_.IsCompacting();
+  ms_count_++;
+  tracer->set_full_gc_count(ms_count_);
 
-  if (is_compacting) {
-    mc_count_++;
-  } else {
-    ms_count_++;
-  }
-  tracer->set_full_gc_count(mc_count_ + ms_count_);
+  MarkCompactPrologue();
 
-  MarkCompactPrologue(is_compacting);
-
-  is_safe_to_read_maps_ = false;
   mark_compact_collector_.CollectGarbage();
-  is_safe_to_read_maps_ = true;
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
   gc_state_ = NOT_IN_GC;
 
-  Shrink();
-
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
 }
 
 
-void Heap::MarkCompactPrologue(bool is_compacting) {
+void Heap::MarkCompactPrologue() {
   // At any old GC clear the keyed lookup cache to enable collection of unused
   // maps.
   isolate_->keyed_lookup_cache()->Clear();
@@ -847,7 +887,8 @@
 
   CompletelyClearInstanceofCache();
 
-  if (is_compacting) FlushNumberStringCache();
+  // TODO(1605) select heuristic for flushing NumberString cache with
+  // FlushNumberStringCache
   if (FLAG_cleanup_code_caches_at_gc) {
     polymorphic_code_cache()->set_cache(undefined_value());
   }
@@ -857,13 +898,8 @@
 
 
 Object* Heap::FindCodeObject(Address a) {
-  Object* obj = NULL;  // Initialization to please compiler.
-  { MaybeObject* maybe_obj = code_space_->FindObject(a);
-    if (!maybe_obj->ToObject(&obj)) {
-      obj = lo_space_->FindObject(a)->ToObjectUnchecked();
-    }
-  }
-  return obj;
+  return isolate()->inner_pointer_to_code_cache()->
+      GcSafeFindCodeForInnerPointer(a);
 }
 
 
@@ -911,23 +947,29 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(HEAP->code_space());
-  for (HeapObject* object = code_it.next();
-       object != NULL; object = code_it.next())
+  for (HeapObject* object = code_it.Next();
+       object != NULL; object = code_it.Next())
     object->Iterate(&v);
 
-  HeapObjectIterator data_it(HEAP->old_data_space());
-  for (HeapObject* object = data_it.next();
-       object != NULL; object = data_it.next())
-    object->Iterate(&v);
+  // The old data space was normally swept conservatively so that the iterator
+  // doesn't work, so we normally skip the next bit.
+  if (!HEAP->old_data_space()->was_swept_conservatively()) {
+    HeapObjectIterator data_it(HEAP->old_data_space());
+    for (HeapObject* object = data_it.Next();
+         object != NULL; object = data_it.Next())
+      object->Iterate(&v);
+  }
 }
 #endif
 
 
 void Heap::CheckNewSpaceExpansionCriteria() {
   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.Capacity()) {
-    // Grow the size of new space if there is room to grow and enough
-    // data has survived scavenge since the last expansion.
+      survived_since_last_expansion_ > new_space_.Capacity() &&
+      !new_space_high_promotion_mode_active_) {
+    // Grow the size of new space if there is room to grow, enough data
+    // has survived scavenge since the last expansion and we are not in
+    // high promotion mode.
     new_space_.Grow();
     survived_since_last_expansion_ = 0;
   }
@@ -940,29 +982,107 @@
 }
 
 
+void Heap::ScavengeStoreBufferCallback(
+    Heap* heap,
+    MemoryChunk* page,
+    StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        ASSERT(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->HandleFullness();
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      ASSERT(current_page_ == page);
+      ASSERT(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      ASSERT(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void PromotionQueue::Initialize() {
+  // Assumes that a NewSpacePage exactly fits a number of promotion queue
+  // entries (where each is a pair of intptr_t). This allows us to simplify
+  // the test fpr when to switch pages.
+  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+         == 0);
+  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+  front_ = rear_ =
+      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+  emergency_stack_ = NULL;
+  guard_ = false;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+  ASSERT(emergency_stack_ == NULL);
+
+  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  intptr_t* head_start = rear_;
+  intptr_t* head_end =
+      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+
+  int entries_count =
+      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+  emergency_stack_ = new List<Entry>(2 * entries_count);
+
+  while (head_start != head_end) {
+    int size = static_cast<int>(*(head_start++));
+    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+    emergency_stack_->Add(Entry(obj, size));
+  }
+  rear_ = head_end;
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
+  if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
 #endif
 
   gc_state_ = SCAVENGE;
 
-  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
-  Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
-#ifdef DEBUG
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif
-
-  // We do not update an allocation watermark of the top page during linear
-  // allocation to avoid overhead. So to maintain the watermark invariant
-  // we have to manually cache the watermark and mark the top page as having an
-  // invalid watermark. This guarantees that dirty regions iteration will use a
-  // correct watermark even if a linear allocation happens.
-  old_pointer_space_->FlushTopPageWatermark();
-  map_space_->FlushTopPageWatermark();
-
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
@@ -974,6 +1094,13 @@
 
   CheckNewSpaceExpansionCriteria();
 
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  old_pointer_space()->AdvanceSweeper(new_space_.Size());
+  old_data_space()->AdvanceSweeper(new_space_.Size());
+
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_.Flip();
@@ -996,32 +1123,29 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceLow();
-  promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize();
 
-  is_safe_to_read_maps_ = false;
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
   ScavengeVisitor scavenge_visitor(this);
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.  By definition,
-  // there are no intergenerational pointers in code or data spaces.
-  IterateDirtyRegions(old_pointer_space_,
-                      &Heap::IteratePointersInDirtyRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  IterateDirtyRegions(map_space_,
-                      &IteratePointersInDirtyMapsRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  lo_space_->IterateDirtyRegions(&ScavengePointer);
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this,
+                                  store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* cell = cell_iterator.next();
-       cell != NULL; cell = cell_iterator.next()) {
+  for (HeapObject* cell = cell_iterator.Next();
+       cell != NULL; cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1040,20 +1164,23 @@
       &scavenge_visitor);
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
-
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
+  promotion_queue_.Destroy();
+
   LiveObjectList::UpdateReferencesForScavengeGC();
   isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ASSERT(new_space_front == new_space_.top());
 
-  is_safe_to_read_maps_ = true;
-
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
       (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1081,7 +1208,9 @@
 
 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
     ExternalStringTableUpdaterCallback updater_func) {
-  external_string_table_.Verify();
+  if (FLAG_verify_heap) {
+    external_string_table_.Verify();
+  }
 
   if (external_string_table_.new_space_strings_.is_empty()) return;
 
@@ -1112,35 +1241,56 @@
 }
 
 
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
                                              WeakObjectRetainer* retainer) {
-  Object* head = heap->undefined_value();
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
   JSFunction* tail = NULL;
   Object* candidate = function;
-  while (candidate != heap->undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == heap->undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_function;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
-        tail->set_next_function_link(candidate_function);
+        tail->set_next_function_link(retain);
       }
       // Retained function is new tail.
+      candidate_function = reinterpret_cast<JSFunction*>(retain);
       tail = candidate_function;
+
+      ASSERT(retain->IsUndefined() || retain->IsJSFunction());
+
+      if (retain == undefined) break;
     }
+
     // Move to next element in the list.
     candidate = candidate_function->next_function_link();
   }
 
   // Terminate the list if there is one or more elements.
   if (tail != NULL) {
-    tail->set_next_function_link(heap->undefined_value());
+    tail->set_next_function_link(undefined);
   }
 
   return head;
@@ -1148,28 +1298,32 @@
 
 
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  Object* head = undefined_value();
+  Object* undefined = undefined_value();
+  Object* head = undefined;
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
-  while (candidate != undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_context;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_unchecked(this,
                             Context::NEXT_CONTEXT_LINK,
-                            candidate_context,
+                            retain,
                             UPDATE_WRITE_BARRIER);
       }
       // Retained context is new tail.
+      candidate_context = reinterpret_cast<Context*>(retain);
       tail = candidate_context;
 
+      if (retain == undefined) break;
+
       // Process the weak list of optimized functions for the context.
       Object* function_list_head =
           ProcessFunctionWeakReferences(
@@ -1181,6 +1335,7 @@
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
     }
+
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   }
@@ -1212,35 +1367,45 @@
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
-    ASSERT(new_space_front <= new_space_.top());
-
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front < new_space_.top()) {
-      HeapObject* object = HeapObject::FromAddress(new_space_front);
-      new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+          NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+      }
     }
 
     // Promote and process all the to-be-promoted objects.
-    while (!promotion_queue_.is_empty()) {
-      HeapObject* target;
-      int size;
-      promotion_queue_.remove(&target, &size);
+    {
+      StoreBufferRebuildScope scope(this,
+                                    store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
 
-      // Promoted object might be already partially visited
-      // during dirty regions iteration. Thus we search specificly
-      // for pointers to from semispace instead of looking for pointers
-      // to new space.
-      ASSERT(!target->IsMap());
-      IterateAndMarkPointersToFromSpace(target->address(),
-                                        target->address() + size,
-                                        &ScavengePointer);
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        ASSERT(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(target->address(),
+                                          target->address() + size,
+                                          &ScavengeObject);
+      }
     }
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front < new_space_.top());
+  } while (new_space_front != new_space_.top());
 
   return new_space_front;
 }
@@ -1252,26 +1417,11 @@
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
 
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* obj));
-
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1306,9 +1456,13 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
-    table_.Register(kVisitJSFunction,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<JSFunction::kSize>);
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(kVisitJSFunction,
+                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                          template VisitSpecialized<JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1349,10 +1503,10 @@
   // Helper function used by CopyObject to copy a source object to an
   // allocated target object and update the forwarding pointer in the source
   // object.  Returns the target object.
-  INLINE(static HeapObject* MigrateObject(Heap* heap,
-                                          HeapObject* source,
-                                          HeapObject* target,
-                                          int size)) {
+  INLINE(static void MigrateObject(Heap* heap,
+                                   HeapObject* source,
+                                   HeapObject* target,
+                                   int size)) {
     // Copy the content of source to target.
     heap->CopyBlock(target->address(), source->address(), size);
 
@@ -1373,26 +1527,30 @@
       }
     }
 
-    return target;
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytes(target->address(), size);
+      }
+    }
   }
 
-
   template<ObjectContents object_contents, SizeRestriction size_restriction>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
                                     HeapObject* object,
                                     int object_size) {
-    ASSERT((size_restriction != SMALL) ||
-           (object_size <= Page::kMaxHeapObjectSize));
-    ASSERT(object->Size() == object_size);
+    SLOW_ASSERT((size_restriction != SMALL) ||
+                (object_size <= Page::kMaxNonCodeHeapObjectSize));
+    SLOW_ASSERT(object->Size() == object_size);
 
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
+          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
+        maybe_result = heap->lo_space()->AllocateRaw(object_size,
+                                                     NOT_EXECUTABLE);
       } else {
         if (object_contents == DATA_OBJECT) {
           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1404,7 +1562,12 @@
       Object* result = NULL;  // Initialization to please compiler.
       if (maybe_result->ToObject(&result)) {
         HeapObject* target = HeapObject::cast(result);
-        *slot = MigrateObject(heap, object , target, object_size);
+
+        // Order is important: slot might be inside of the target if target
+        // was allocated over a dead object and slot comes from the store
+        // buffer.
+        *slot = target;
+        MigrateObject(heap, object, target, object_size);
 
         if (object_contents == POINTER_OBJECT) {
           heap->promotion_queue()->insert(target, object_size);
@@ -1414,13 +1577,42 @@
         return;
       }
     }
-    Object* result =
-        heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
-    *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
+    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+    Object* result = allocation->ToObjectUnchecked();
+    HeapObject* target = HeapObject::cast(result);
+
+    // Order is important: slot might be inside of the target if target
+    // was allocated over a dead object and slot comes from the store
+    // buffer.
+    *slot = target;
+    MigrateObject(heap, object, target, object_size);
     return;
   }
 
 
+  static inline void EvacuateJSFunction(Map* map,
+                                        HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::
+        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+
+    HeapObject* target = *slot;
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->
+          RecordCodeEntrySlot(code_entry_slot, code);
+    }
+  }
+
+
   static inline void EvacuateFixedArray(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
@@ -1479,14 +1671,17 @@
                                                HeapObject* object) {
     ASSERT(IsShortcutCandidate(map->instance_type()));
 
-    if (ConsString::cast(object)->unchecked_second() ==
-        map->heap()->empty_string()) {
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() ==
+        heap->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
       *slot = first;
 
-      if (!map->heap()->InNewSpace(first)) {
+      if (!heap->InNewSpace(first)) {
         object->set_map_word(MapWord::FromForwardingAddress(first));
         return;
       }
@@ -1500,7 +1695,7 @@
         return;
       }
 
-      DoScavengeObject(first->map(), slot, first);
+      heap->DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1531,55 +1726,70 @@
 };
 
 
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<logging_and_profiling_mode>::table_;
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
 
 static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  scavenging_visitors_table_.CopyFrom(
-      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
-  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
-  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
-    // Table was already updated by some isolate.
-    return;
-  }
-
-  if (isolate()->logger()->is_logging() |
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      isolate()->logger()->is_logging() ||
       CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_profiling())) {
-    // If one of the isolates is doing scavenge at this moment of time
-    // it might see this table in an inconsitent state when
-    // some of the callbacks point to
-    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
-    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
-    // However this does not lead to any bugs as such isolate does not have
-    // profiling enabled and any isolate with enabled profiling is guaranteed
-    // to see the table in the consistent state.
-    scavenging_visitors_table_.CopyFrom(
-        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+       isolate()->heap_profiler()->is_profiling());
 
-    // We use Release_Store to prevent reordering of this write before writes
-    // to the table.
-    Release_Store(&scavenging_visitors_table_mode_,
-                  LOGGING_AND_PROFILING_ENABLED);
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+
+    if (incremental_marking()->IsCompacting()) {
+      // When compacting forbid short-circuiting of cons-strings.
+      // Scavenging code relies on the fact that new space object
+      // can't be evacuated into evacuation candidate but
+      // short-circuiting violates this assumption.
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitShortcutCandidate,
+          scavenging_visitors_table_.GetVisitorById(
+              StaticVisitorBase::kVisitConsString));
+    }
   }
 }
 
 
 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
-  ASSERT(HEAP->InFromSpace(object));
+  SLOW_ASSERT(HEAP->InFromSpace(object));
   MapWord first_word = object->map_word();
-  ASSERT(!first_word.IsForwardingAddress());
+  SLOW_ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  DoScavengeObject(map, p, object);
+  map->GetHeap()->DoScavengeObject(map, p, object);
 }
 
 
@@ -1605,29 +1815,31 @@
 }
 
 
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type,
+                               int instance_size,
+                               ElementsKind elements_kind) {
   Object* result;
   { MaybeObject* maybe_result = AllocateRawMap();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
   Map* map = reinterpret_cast<Map*>(result);
-  map->set_map(meta_map());
+  map->set_map_unsafe(meta_map());
   map->set_instance_type(instance_type);
   map->set_visitor_id(
       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
-  map->set_prototype(null_value());
-  map->set_constructor(null_value());
+  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
+  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
   map->set_instance_size(instance_size);
   map->set_inobject_properties(0);
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
-  map->set_code_cache(empty_fixed_array());
-  map->set_prototype_transitions(empty_fixed_array());
+  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  map->set_elements_kind(FAST_ELEMENTS);
+  map->set_elements_kind(elements_kind);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1645,8 +1857,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   CodeCache* code_cache = CodeCache::cast(result);
-  code_cache->set_default_cache(empty_fixed_array());
-  code_cache->set_normal_type_cache(undefined_value());
+  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
   return code_cache;
 }
 
@@ -1707,12 +1919,19 @@
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_null_value(obj);
+  set_null_value(Oddball::cast(obj));
   Oddball::cast(obj)->set_kind(Oddball::kNull);
 
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_undefined_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+  ASSERT(!InNewSpace(undefined_value()));
+
   // Allocate the empty descriptor array.
   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -1753,7 +1972,7 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_serialized_scope_info_map(Map::cast(obj));
+  set_scope_info_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -1798,6 +2017,12 @@
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_free_space_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1950,7 +2175,7 @@
 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
   Object* result;
@@ -1959,7 +2184,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapObject::cast(result)->set_map_unsafe(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -1971,13 +2196,13 @@
 
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   Object* result;
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(heap_number_map());
+  HeapObject::cast(result)->set_map_unsafe(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -1988,7 +2213,7 @@
   { MaybeObject* maybe_result = AllocateRawCell();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(global_property_cell_map());
+  HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
   JSGlobalPropertyCell::cast(result)->set_value(value);
   return result;
 }
@@ -1998,7 +2223,7 @@
                                  Object* to_number,
                                  byte kind) {
   Object* result;
-  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2011,7 +2236,13 @@
   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_neander_map(Map::cast(obj));
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  Map* new_neander_map = Map::cast(obj);
+  new_neander_map->set_elements_kind(FAST_ELEMENTS);
+  set_neander_map(new_neander_map);
 
   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2056,6 +2287,12 @@
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime();
 }
 
 
@@ -2066,20 +2303,18 @@
   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_minus_zero_value(obj);
+  set_minus_zero_value(HeapNumber::cast(obj));
   ASSERT(signbit(minus_zero_value()->Number()) != 0);
 
   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_nan_value(obj);
+  set_nan_value(HeapNumber::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_undefined_value(obj);
-  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
-  ASSERT(!InNewSpace(undefined_value()));
+  set_infinity_value(HeapNumber::cast(obj));
 
   // Allocate initial symbol table.
   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2088,19 +2323,17 @@
   // Don't use set_symbol_table() due to asserts.
   roots_[kSymbolTableRootIndex] = obj;
 
-  // Assign the print strings for oddballs after creating symboltable.
-  Object* symbol;
-  { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
-    if (!maybe_symbol->ToObject(&symbol)) return false;
-  }
-  Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
-  Oddball::cast(undefined_value())->set_to_number(nan_value());
-
-  // Allocate the null_value
+  // Finish initializing oddballs after creating symboltable.
   { MaybeObject* maybe_obj =
-        Oddball::cast(null_value())->Initialize("null",
-                                                Smi::FromInt(0),
-                                                Oddball::kNull);
+        undefined_value()->Initialize("undefined",
+                                      nan_value(),
+                                      Oddball::kUndefined);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+
+  // Initialize the null_value.
+  { MaybeObject* maybe_obj =
+        null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
 
@@ -2109,43 +2342,51 @@
                                            Oddball::kTrue);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_true_value(obj);
+  set_true_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("false",
                                            Smi::FromInt(0),
                                            Oddball::kFalse);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_false_value(obj);
+  set_false_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("hole",
                                            Smi::FromInt(-1),
                                            Oddball::kTheHole);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_the_hole_value(obj);
+  set_the_hole_value(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
-                                           Smi::FromInt(-4),
+                                           Smi::FromInt(-2),
                                            Oddball::kArgumentMarker);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_arguments_marker(obj);
+  set_arguments_marker(Oddball::cast(obj));
 
   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
-                                           Smi::FromInt(-2),
+                                           Smi::FromInt(-3),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_no_interceptor_result_sentinel(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
-                                           Smi::FromInt(-3),
+                                           Smi::FromInt(-4),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_termination_exception(obj);
 
+  { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
+                                           Smi::FromInt(-5),
+                                           Oddball::kOther);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_frame_alignment_marker(Oddball::cast(obj));
+  STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
+
   // Allocate the empty string.
   { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2181,17 +2422,17 @@
 
   // Allocate the code_stubs dictionary. The initial size is set to avoid
   // expanding the dictionary during bootstrapping.
-  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
+  { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_code_stubs(UnseededNumberDictionary::cast(obj));
+  set_code_stubs(NumberDictionary::cast(obj));
 
   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   // is set to avoid expanding the dictionary during bootstrapping.
-  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
+  { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
+  set_non_monomorphic_cache(NumberDictionary::cast(obj));
 
   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2422,6 +2663,15 @@
 }
 
 
+MaybeObject* Heap::Uint32ToString(uint32_t value,
+                                  bool check_number_string_cache) {
+  Object* number;
+  MaybeObject* maybe = NumberFromUint32(value);
+  if (!maybe->To<Object>(&number)) return maybe;
+  return NumberToString(number, check_number_string_cache);
+}
+
+
 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
 }
@@ -2478,14 +2728,12 @@
 
 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Object* result;
-  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-
-  Foreign::cast(result)->set_address(address);
+  Foreign* result;
+  MaybeObject* maybe_result = Allocate(foreign_map(), space);
+  if (!maybe_result->To(&result)) return maybe_result;
+  result->set_foreign_address(address);
   return result;
 }
 
@@ -2499,17 +2747,17 @@
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
-  share->set_scope_info(SerializedScopeInfo::Empty());
+  share->set_scope_info(ScopeInfo::Empty());
   Code* construct_stub =
       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   share->set_construct_stub(construct_stub);
   share->set_instance_class_name(Object_symbol());
-  share->set_function_data(undefined_value());
-  share->set_script(undefined_value());
-  share->set_debug_info(undefined_value());
-  share->set_inferred_name(empty_string());
-  share->set_initial_map(undefined_value());
-  share->set_this_property_assignments(undefined_value());
+  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
+  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
   share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
 
   // Set integer fields (smi or int, depending on the architecture).
@@ -2541,8 +2789,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   JSMessageObject* message = JSMessageObject::cast(result);
-  message->set_properties(Heap::empty_fixed_array());
-  message->set_elements(Heap::empty_fixed_array());
+  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
+  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
   message->set_type(type);
   message->set_arguments(arguments);
   message->set_start_position(start_position);
@@ -2658,14 +2906,14 @@
       // Copy first part.
       const char* src;
       if (first->IsExternalString()) {
-        src = ExternalAsciiString::cast(first)->resource()->data();
+        src = ExternalAsciiString::cast(first)->GetChars();
       } else {
         src = SeqAsciiString::cast(first)->GetChars();
       }
       for (int i = 0; i < first_length; i++) *dest++ = src[i];
       // Copy second part.
       if (second->IsExternalString()) {
-        src = ExternalAsciiString::cast(second)->resource()->data();
+        src = ExternalAsciiString::cast(second)->GetChars();
       } else {
         src = SeqAsciiString::cast(second)->GetChars();
       }
@@ -2737,25 +2985,23 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  // TODO(1626): For now slicing external strings is not supported.  However,
-  // a flat cons string can have an external string as first part in some cases.
-  // Therefore we have to single out this case as well.
   if (!FLAG_string_slices ||
-      (buffer->IsConsString() &&
-        (!buffer->IsFlat() ||
-         !ConsString::cast(buffer)->first()->IsSeqString())) ||
-      buffer->IsExternalString() ||
+      !buffer->IsFlat() ||
       length < SlicedString::kMinLength ||
       pretenure == TENURED) {
     Object* result;
-    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                     ? AllocateRawAsciiString(length, pretenure)
-                     : AllocateRawTwoByteString(length, pretenure);
+    // WriteToFlat takes care of the case when an indirect string has a
+    // different encoding from its underlying string.  These encodings may
+    // differ because of externalization.
+    bool is_ascii = buffer->IsAsciiRepresentation();
+    { MaybeObject* maybe_result = is_ascii
+                                  ? AllocateRawAsciiString(length, pretenure)
+                                  : AllocateRawTwoByteString(length, pretenure);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     String* string_result = String::cast(result);
     // Copy the characters into the new object.
-    if (buffer->IsAsciiRepresentation()) {
+    if (is_ascii) {
       ASSERT(string_result->IsAsciiRepresentation());
       char* dest = SeqAsciiString::cast(string_result)->GetChars();
       String::WriteToFlat(buffer, dest, start, end);
@@ -2768,12 +3014,19 @@
   }
 
   ASSERT(buffer->IsFlat());
-  ASSERT(!buffer->IsExternalString());
 #if DEBUG
-  buffer->StringVerify();
+  if (FLAG_verify_heap) {
+    buffer->StringVerify();
+  }
 #endif
 
   Object* result;
+  // When slicing an indirect string we use its encoding for a newly created
+  // slice and don't check the encoding of the underlying string.  This is safe
+  // even if the encodings are different because of externalization.  If an
+  // indirect ASCII string is pointing to a two-byte string, the two-byte char
+  // codes of the underlying string must still fit into ASCII (because
+  // externalization must not change char codes).
   { Map* map = buffer->IsAsciiRepresentation()
                  ? sliced_ascii_string_map()
                  : sliced_string_map();
@@ -2799,13 +3052,14 @@
     sliced_string->set_parent(buffer);
     sliced_string->set_offset(start);
   }
-  ASSERT(sliced_string->parent()->IsSeqString());
+  ASSERT(sliced_string->parent()->IsSeqString() ||
+         sliced_string->parent()->IsExternalString());
   return result;
 }
 
 
 MaybeObject* Heap::AllocateExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2828,7 +3082,7 @@
 
 
 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2890,13 +3144,13 @@
   }
   int size = ByteArray::SizeFor(length);
   Object* result;
-  { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
                    ? old_data_space_->AllocateRaw(size)
-                   : lo_space_->AllocateRaw(size);
+                   : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -2908,13 +3162,13 @@
   }
   int size = ByteArray::SizeFor(length);
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -2924,12 +3178,12 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map(one_pointer_filler_map());
+    filler->set_map_unsafe(one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map(two_pointer_filler_map());
+    filler->set_map_unsafe(two_pointer_filler_map());
   } else {
-    filler->set_map(byte_array_map());
-    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+    filler->set_map_unsafe(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
   }
 }
 
@@ -2946,7 +3200,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ExternalArray*>(result)->set_map(
+  reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
       MapForExternalArrayType(array_type));
   reinterpret_cast<ExternalArray*>(result)->set_length(length);
   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -2962,10 +3216,9 @@
                               bool immovable) {
   // Allocate ByteArray before the Code object, so that we do not risk
   // leaving uninitialized Code object (and breaking the heap).
-  Object* reloc_info;
-  { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
-    if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
-  }
+  ByteArray* reloc_info;
+  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
+  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
 
   // Compute size.
   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
@@ -2974,8 +3227,8 @@
   MaybeObject* maybe_result;
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
-  if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+  if (obj_size > code_space()->AreaSize() || immovable) {
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -2984,17 +3237,18 @@
   if (!maybe_result->ToObject(&result)) return maybe_result;
 
   // Initialize the object
-  HeapObject::cast(result)->set_map(code_map());
+  HeapObject::cast(result)->set_map_unsafe(code_map());
   Code* code = Code::cast(result);
   ASSERT(!isolate_->code_range()->exists() ||
       isolate_->code_range()->contains(code->address()));
   code->set_instruction_size(desc.instr_size);
-  code->set_relocation_info(ByteArray::cast(reloc_info));
+  code->set_relocation_info(reloc_info);
   code->set_flags(flags);
   if (code->is_call_stub() || code->is_keyed_call_stub()) {
     code->set_check_type(RECEIVER_MAP_CHECK);
   }
-  code->set_deoptimization_data(empty_fixed_array());
+  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
   code->set_next_code_flushing_candidate(undefined_value());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
@@ -3009,7 +3263,9 @@
   code->CopyFrom(desc);
 
 #ifdef DEBUG
-  code->Verify();
+  if (FLAG_verify_heap) {
+    code->Verify();
+  }
 #endif
   return code;
 }
@@ -3019,8 +3275,8 @@
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
   MaybeObject* maybe_result;
-  if (obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+  if (obj_size > code_space()->AreaSize()) {
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3062,8 +3318,8 @@
       static_cast<size_t>(code->instruction_end() - old_addr);
 
   MaybeObject* maybe_result;
-  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+  if (new_obj_size > code_space()->AreaSize()) {
+    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -3089,7 +3345,9 @@
   new_code->Relocate(new_addr - old_addr);
 
 #ifdef DEBUG
-  code->Verify();
+  if (FLAG_verify_heap) {
+    code->Verify();
+  }
 #endif
   return new_code;
 }
@@ -3107,14 +3365,15 @@
         AllocateRaw(map->instance_size(), space, retry_space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map(map);
+  // No need for write barrier since object is white and map is in old space.
+  HeapObject::cast(result)->set_map_unsafe(map);
   return result;
 }
 
 
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
-                                      SharedFunctionInfo* shared,
-                                      Object* prototype) {
+void Heap::InitializeFunction(JSFunction* function,
+                              SharedFunctionInfo* shared,
+                              Object* prototype) {
   ASSERT(!prototype->IsMap());
   function->initialize_properties();
   function->initialize_elements();
@@ -3122,9 +3381,8 @@
   function->set_code(shared->code());
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
-  function->set_literals(empty_fixed_array());
+  function->set_literals_or_bindings(empty_fixed_array());
   function->set_next_function_link(undefined_value());
-  return function;
 }
 
 
@@ -3134,8 +3392,18 @@
   // different context.
   JSFunction* object_function =
       function->context()->global_context()->object_function();
+
+  // Each function prototype gets a copy of the object function map.
+  // This avoid unwanted sharing of maps between prototypes of different
+  // constructors.
+  Map* new_map;
+  ASSERT(object_function->has_initial_map());
+  { MaybeObject* maybe_map =
+        object_function->initial_map()->CopyDropTransitions();
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+  }
   Object* prototype;
-  { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
+  { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   }
   // When creating the prototype for the function we must set its
@@ -3160,7 +3428,8 @@
   { MaybeObject* maybe_result = Allocate(function_map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  return InitializeFunction(JSFunction::cast(result), shared, prototype);
+  InitializeFunction(JSFunction::cast(result), shared, prototype);
+  return result;
 }
 
 
@@ -3171,7 +3440,7 @@
   JSObject* boilerplate;
   int arguments_object_size;
   bool strict_mode_callee = callee->IsJSFunction() &&
-                            JSFunction::cast(callee)->shared()->strict_mode();
+      !JSFunction::cast(callee)->shared()->is_classic_mode();
   if (strict_mode_callee) {
     boilerplate =
         isolate()->context()->global_context()->
@@ -3277,22 +3546,22 @@
       // Inline constructor can only handle inobject properties.
       fun->shared()->ForbidInlineConstructor();
     } else {
-      Object* descriptors_obj;
+      DescriptorArray* descriptors;
       { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
-        if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
+        if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
           return maybe_descriptors_obj;
         }
       }
-      DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+      DescriptorArray::WhitenessWitness witness(descriptors);
       for (int i = 0; i < count; i++) {
         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
         ASSERT(name->IsSymbol());
         FieldDescriptor field(name, i, NONE);
         field.SetEnumerationIndex(i);
-        descriptors->Set(i, &field);
+        descriptors->Set(i, &field, witness);
       }
       descriptors->SetNextEnumerationIndex(count);
-      descriptors->SortUnchecked();
+      descriptors->SortUnchecked(witness);
 
       // The descriptors may contain duplicates because the compiler does not
       // guarantee the uniqueness of property names (it would have required
@@ -3330,6 +3599,9 @@
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
       JSFunction::cast(map->constructor())->shared()->
           IsInobjectSlackTrackingInProgress()) {
@@ -3339,7 +3611,7 @@
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map->instance_size(), filler);
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
 }
 
 
@@ -3367,7 +3639,7 @@
   // Allocate the JSObject.
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
   Object* obj;
   { MaybeObject* maybe_obj = Allocate(map, space);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -3377,7 +3649,8 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
-  ASSERT(JSObject::cast(obj)->HasFastElements());
+  ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+         JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
@@ -3420,6 +3693,7 @@
   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   return result;
 }
 
@@ -3443,6 +3717,7 @@
   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   result->set_call_trap(call_trap);
   result->set_construct_trap(construct_trap);
   return result;
@@ -3525,13 +3800,15 @@
 MaybeObject* Heap::CopyJSObject(JSObject* source) {
   // Never used to copy functions.  If functions need to be copied we
   // have to be careful to clear the literals array.
-  ASSERT(!source->IsJSFunction());
+  SLOW_ASSERT(!source->IsJSFunction());
 
   // Make the clone.
   Map* map = source->map();
   int object_size = map->instance_size();
   Object* clone;
 
+  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
   // If we're forced to always allocate, we use the general allocation
   // functions which may leave us with an object in old space.
   if (always_allocate()) {
@@ -3548,10 +3825,11 @@
                  JSObject::kHeaderSize,
                  (object_size - JSObject::kHeaderSize) / kPointerSize);
   } else {
+    wb_mode = SKIP_WRITE_BARRIER;
     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
     }
-    ASSERT(InNewSpace(clone));
+    SLOW_ASSERT(InNewSpace(clone));
     // Since we know the clone is allocated in new space, we can copy
     // the contents without worrying about updating the write barrier.
     CopyBlock(HeapObject::cast(clone)->address(),
@@ -3559,6 +3837,8 @@
               object_size);
   }
 
+  SLOW_ASSERT(
+      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
@@ -3574,7 +3854,7 @@
       }
       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
     }
-    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
+    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
   }
   // Update properties if necessary.
   if (properties->length() > 0) {
@@ -3582,7 +3862,7 @@
     { MaybeObject* maybe_prop = CopyFixedArray(properties);
       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
     }
-    JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
+    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
   }
   // Return the new clone.
   return clone;
@@ -3591,13 +3871,13 @@
 
 MaybeObject* Heap::ReinitializeJSReceiver(
     JSReceiver* object, InstanceType type, int size) {
-  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Map* map;
-  MaybeObject* maybe_map_obj = AllocateMap(type, size);
-  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  MaybeObject* maybe = AllocateMap(type, size);
+  if (!maybe->To<Map>(&map)) return maybe;
 
   // Check that the receiver has at least the size of the fresh object.
   int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3608,30 +3888,35 @@
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
   Object* properties;
-  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
-    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+  maybe = AllocateFixedArray(prop_size, TENURED);
+  if (!maybe->ToObject(&properties)) return maybe;
+
+  // Functions require some allocation, which might fail here.
+  SharedFunctionInfo* shared = NULL;
+  if (type == JS_FUNCTION_TYPE) {
+    String* name;
+    maybe = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe->To<String>(&name)) return maybe;
+    maybe = AllocateSharedFunctionInfo(name);
+    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
   }
 
+  // Because of possible retries of this function after failure,
+  // we must NOT fail after this point, where we have changed the type!
+
   // Reset the map for the object.
   object->set_map(map);
+  JSObject* jsobj = JSObject::cast(object);
 
   // Reinitialize the object from the constructor map.
-  InitializeJSObjectFromMap(JSObject::cast(object),
-                            FixedArray::cast(properties), map);
+  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
 
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
-    String* name;
-    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
-    if (!maybe_name->To<String>(&name)) return maybe_name;
-    SharedFunctionInfo* shared;
-    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
-    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
-    JSFunction* func;
-    MaybeObject* maybe_func =
-        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
-    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
-    func->set_context(isolate()->context()->global_context());
+    map->set_function_with_prototype(true);
+    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    JSFunction::cast(object)->set_context(
+        isolate()->context()->global_context());
   }
 
   // Put in filler if the new object is smaller than the old.
@@ -3749,31 +4034,22 @@
   if (InNewSpace(string)) return NULL;
 
   // Find the corresponding symbol map for strings.
-  Map* map = string->map();
-  if (map == ascii_string_map()) {
-    return ascii_symbol_map();
+  switch (string->map()->instance_type()) {
+    case STRING_TYPE: return symbol_map();
+    case ASCII_STRING_TYPE: return ascii_symbol_map();
+    case CONS_STRING_TYPE: return cons_symbol_map();
+    case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
+    case EXTERNAL_STRING_TYPE: return external_symbol_map();
+    case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
+    case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+      return external_symbol_with_ascii_data_map();
+    case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+      return short_external_ascii_symbol_map();
+    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+      return short_external_symbol_with_ascii_data_map();
+    default: return NULL;  // No match found.
   }
-  if (map == string_map()) {
-    return symbol_map();
-  }
-  if (map == cons_string_map()) {
-    return cons_symbol_map();
-  }
-  if (map == cons_ascii_string_map()) {
-    return cons_ascii_symbol_map();
-  }
-  if (map == external_string_map()) {
-    return external_symbol_map();
-  }
-  if (map == external_ascii_string_map()) {
-    return external_ascii_symbol_map();
-  }
-  if (map == external_string_with_ascii_data_map()) {
-    return external_symbol_with_ascii_data_map();
-  }
-
-  // No match found.
-  return NULL;
 }
 
 
@@ -3813,13 +4089,13 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map(map);
+  reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(chars);
@@ -3850,11 +4126,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -3863,7 +4140,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map(ascii_string_map());
+  HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3885,11 +4162,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -3898,7 +4176,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map(string_map());
+  HeapObject::cast(result)->set_map_unsafe(string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -3914,7 +4192,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
+  reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
   reinterpret_cast<FixedArray*>(result)->set_length(0);
   return result;
 }
@@ -3931,7 +4209,7 @@
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRawFixedArray(size);
+      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
 }
 
 
@@ -3943,13 +4221,13 @@
   }
   if (InNewSpace(obj)) {
     HeapObject* dst = HeapObject::cast(obj);
-    dst->set_map(map);
+    dst->set_map_unsafe(map);
     CopyBlock(dst->address() + kPointerSize,
               src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  HeapObject::cast(obj)->set_map(map);
+  HeapObject::cast(obj)->set_map_unsafe(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
@@ -3969,7 +4247,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   HeapObject* dst = HeapObject::cast(obj);
-  dst->set_map(map);
+  dst->set_map_unsafe(map);
   CopyBlock(
       dst->address() + FixedDoubleArray::kLengthOffset,
       src->address() + FixedDoubleArray::kLengthOffset,
@@ -3987,7 +4265,7 @@
   }
   // Initialize header.
   FixedArray* array = reinterpret_cast<FixedArray*>(result);
-  array->set_map(fixed_array_map());
+  array->set_map_unsafe(fixed_array_map());
   array->set_length(length);
   // Initialize body.
   ASSERT(!InNewSpace(undefined_value()));
@@ -4008,13 +4286,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_POINTER_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old pointer space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4035,7 +4313,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map(heap->fixed_array_map());
+  HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
   FixedArray* array = FixedArray::cast(result);
   array->set_length(length);
   MemsetPointer(array->data_start(), filler, length);
@@ -4068,7 +4346,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
+  reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
   FixedArray::cast(obj)->set_length(length);
   return obj;
 }
@@ -4082,7 +4360,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+  reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
       fixed_double_array_map());
   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
   return result;
@@ -4099,7 +4377,8 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+  reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
+      fixed_double_array_map());
   FixedDoubleArray::cast(obj)->set_length(length);
   return obj;
 }
@@ -4118,13 +4397,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_DATA_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old data space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4135,7 +4414,7 @@
   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
+  reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
   ASSERT(result->IsHashTable());
   return result;
 }
@@ -4148,7 +4427,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(global_context_map());
+  context->set_map_unsafe(global_context_map());
   ASSERT(context->IsGlobalContext());
   ASSERT(result->IsContext());
   return result;
@@ -4162,7 +4441,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(function_context_map());
+  context->set_map_unsafe(function_context_map());
   context->set_closure(function);
   context->set_previous(function->context());
   context->set_extension(NULL);
@@ -4182,7 +4461,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(catch_context_map());
+  context->set_map_unsafe(catch_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(name);
@@ -4200,7 +4479,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(with_context_map());
+  context->set_map_unsafe(with_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(extension);
@@ -4211,14 +4490,14 @@
 
 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
                                         Context* previous,
-                                        SerializedScopeInfo* scope_info) {
+                                        ScopeInfo* scope_info) {
   Object* result;
   { MaybeObject* maybe_result =
-        AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
+        AllocateFixedArrayWithHoles(scope_info->ContextLength());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map(block_context_map());
+  context->set_map_unsafe(block_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(scope_info);
@@ -4227,14 +4506,11 @@
 }
 
 
-MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
-  Object* result;
-  { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  SerializedScopeInfo* scope_info =
-      reinterpret_cast<SerializedScopeInfo*>(result);
-  scope_info->set_map(serialized_scope_info_map());
+MaybeObject* Heap::AllocateScopeInfo(int length) {
+  FixedArray* scope_info;
+  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
+  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
+  scope_info->set_map_unsafe(scope_info_map());
   return scope_info;
 }
 
@@ -4252,7 +4528,7 @@
   }
   int size = map->instance_size();
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
   Object* result;
   { MaybeObject* maybe_result = Allocate(map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4262,6 +4538,21 @@
 }
 
 
+bool Heap::IsHeapIterable() {
+  return (!old_pointer_space()->was_swept_conservatively() &&
+          !old_data_space()->was_swept_conservatively());
+}
+
+
+void Heap::EnsureHeapIsIterable() {
+  ASSERT(IsAllocationAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask);
+  }
+  ASSERT(IsHeapIterable());
+}
+
+
 bool Heap::IdleNotification() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
@@ -4292,7 +4583,7 @@
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
     if (contexts_disposed_ > 0) {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
     } else {
       CollectGarbage(NEW_SPACE);
     }
@@ -4304,12 +4595,12 @@
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(false);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(true);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
@@ -4319,7 +4610,7 @@
       contexts_disposed_ = 0;
     } else {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
       last_idle_notification_gc_count_ = gc_count_;
     }
     // If this is the first idle notification, we reset the
@@ -4339,8 +4630,11 @@
 
   // Make sure that we have no pending context disposals and
   // conditionally uncommit from space.
-  ASSERT(contexts_disposed_ == 0);
+  // Take into account that we might have decided to delay full collection
+  // because incremental marking is in progress.
+  ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
   if (uncommit) UncommitFromSpace();
+
   return finished;
 }
 
@@ -4374,11 +4668,11 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
-  PrintF("mark-compact GC : %d\n", mc_count_);
   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_promotion_limit_);
   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_allocation_limit_);
+  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4455,69 +4749,18 @@
 
 
 #ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address start = page->ObjectAreaStart();
-    Address end = page->AllocationWatermark();
-
-    HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                              start,
-                              end,
-                              visit_dirty_region,
-                              &DummyScavengePointer);
-  }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        HEAP->InNewSpace(*slot);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-
-
 void Heap::Verify() {
   ASSERT(HasBeenSetup());
 
+  store_buffer()->Verify();
+
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
 
   new_space_.Verify();
 
-  VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
-  old_pointer_space_->Verify(&dirty_regions_visitor);
-  map_space_->Verify(&dirty_regions_visitor);
-
-  VerifyPointersUnderWatermark(old_pointer_space_,
-                               &IteratePointersInDirtyRegion);
-  VerifyPointersUnderWatermark(map_space_,
-                               &IteratePointersInDirtyMapsRegion);
-  VerifyPointersUnderWatermark(lo_space_);
-
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4526,6 +4769,7 @@
 
   lo_space_->Verify();
 }
+
 #endif  // DEBUG
 
 
@@ -4621,277 +4865,223 @@
 
 #ifdef DEBUG
 void Heap::ZapFromSpace() {
-  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
-  for (Address a = new_space_.FromSpaceLow();
-       a < new_space_.FromSpaceHigh();
-       a += kPointerSize) {
-    Memory::Address_at(a) = kFromSpaceZapValue;
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->area_start(), limit = page->area_end();
+         cursor < limit;
+         cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
   }
 }
 #endif  // DEBUG
 
 
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
-                                        Address start,
-                                        Address end,
-                                        ObjectSlotCallback copy_object_func) {
-  Address slot_address = start;
-  bool pointers_to_new_space_found = false;
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap->InNewSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      copy_object_func(reinterpret_cast<HeapObject**>(slot));
-      if (heap->InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        pointers_to_new_space_found = true;
-      }
-    }
-    slot_address += kPointerSize;
-  }
-  return pointers_to_new_space_found;
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-static bool IteratePointersInDirtyMaps(Address start,
-                                       Address end,
-                                       ObjectSlotCallback copy_object_func) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  bool pointers_to_new_space_found = false;
-
-  Heap* heap = HEAP;
-  while (map_address < end) {
-    ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    if (Heap::IteratePointersInDirtyRegion(heap,
-                                           pointer_fields_start,
-                                           pointer_fields_end,
-                                           copy_object_func)) {
-      pointers_to_new_space_found = true;
-    }
-
-    map_address += Map::kSize;
-  }
-
-  return pointers_to_new_space_found;
-}
-
-
-bool Heap::IteratePointersInDirtyMapsRegion(
-    Heap* heap,
-    Address start,
-    Address end,
-    ObjectSlotCallback copy_object_func) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  bool contains_pointers_to_new_space = false;
-
-  if (map_aligned_start != start) {
-    Address prev_map = map_aligned_start - Map::kSize;
-    ASSERT(Memory::Object_at(prev_map)->IsMap());
-
-    Address pointer_fields_start =
-        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
-    Address pointer_fields_end =
-        Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  contains_pointers_to_new_space =
-    IteratePointersInDirtyMaps(map_aligned_start,
-                               map_aligned_end,
-                               copy_object_func)
-      || contains_pointers_to_new_space;
-
-  if (map_aligned_end != end) {
-    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
-    Address pointer_fields_start =
-        map_aligned_end + Map::kPointerFieldsBeginOffset;
-
-    Address pointer_fields_end =
-        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  return contains_pointers_to_new_space;
-}
-
-
 void Heap::IterateAndMarkPointersToFromSpace(Address start,
                                              Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
-  Page* page = Page::FromAddress(start);
 
-  uint32_t marks = page->GetRegionMarks();
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (InFromSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      callback(reinterpret_cast<HeapObject**>(slot));
-      if (InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        marks |= page->GetRegionMaskForAddress(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          SLOW_ASSERT(Heap::InToSpace(new_object));
+          SLOW_ASSERT(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
       }
     }
     slot_address += kPointerSize;
   }
-
-  page->SetRegionMarks(marks);
 }
 
 
-uint32_t Heap::IterateDirtyRegions(
-    uint32_t marks,
-    Address area_start,
-    Address area_end,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func) {
-  uint32_t newmarks = 0;
-  uint32_t mask = 1;
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
 
-  if (area_start >= area_end) {
-    return newmarks;
-  }
 
-  Address region_start = area_start;
-
-  // area_start does not necessarily coincide with start of the first region.
-  // Thus to calculate the beginning of the next region we have to align
-  // area_start by Page::kRegionSize.
-  Address second_region =
-      reinterpret_cast<Address>(
-          reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
-          ~Page::kRegionAlignmentMask);
-
-  // Next region might be beyond area_end.
-  Address region_end = Min(second_region, area_end);
-
-  if (marks & mask) {
-    if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
-      newmarks |= mask;
-    }
-  }
-  mask <<= 1;
-
-  // Iterate subsequent regions which fully lay inside [area_start, area_end[.
-  region_start = region_end;
-  region_end = region_start + Page::kRegionSize;
-
-  while (region_end <= area_end) {
-    if (marks & mask) {
-      if (visit_dirty_region(this,
-                             region_start,
-                             region_end,
-                             copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-
-    region_start = region_end;
-    region_end = region_start + Page::kRegionSize;
-
-    mask <<= 1;
-  }
-
-  if (region_start != area_end) {
-    // A small piece of area left uniterated because area_end does not coincide
-    // with region end. Check whether region covering last part of area is
-    // dirty.
-    if (marks & mask) {
-      if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-  }
-
-  return newmarks;
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
 }
 
 
+bool EverythingsAPointer(Object** addr) {
+  return true;
+}
 
-void Heap::IterateDirtyRegions(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func,
-    ExpectedPageWatermarkState expected_page_watermark_state) {
 
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    uint32_t marks = page->GetRegionMarks();
-
-    if (marks != Page::kAllRegionsCleanMarks) {
-      Address start = page->ObjectAreaStart();
-
-      // Do not try to visit pointers beyond page allocation watermark.
-      // Page can contain garbage pointers there.
-      Address end;
-
-      if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
-          page->IsWatermarkValid()) {
-        end = page->AllocationWatermark();
-      } else {
-        end = page->CachedAllocationWatermark();
-      }
-
-      ASSERT(space == old_pointer_space_ ||
-             (space == map_space_ &&
-              ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
-      page->SetRegionMarks(IterateDirtyRegions(marks,
-                                               start,
-                                               end,
-                                               visit_dirty_region,
-                                               copy_object_func));
+static void CheckStoreBuffer(Heap* heap,
+                             Object** current,
+                             Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for ( ; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
+      ASSERT(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
     }
-
-    // Mark page watermark as invalid to maintain watermark validity invariant.
-    // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
-    page->InvalidateWatermark(true);
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    ASSERT(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
   }
 }
 
 
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &EverythingsAPointer,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &IsAMapPointerAddress,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this,
+                       current,
+                       limit,
+                       &store_buffer_position,
+                       store_buffer_top,
+                       &EverythingsAPointer,
+                       NULL,
+                       NULL);
+    }
+  }
+}
+#endif
+
+
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
   IterateWeakRoots(v, mode);
@@ -4941,8 +5131,7 @@
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
   v->Synchronize("builtins");
@@ -4986,11 +5175,20 @@
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
 bool Heap::ConfigureHeap(int max_semispace_size,
-                         int max_old_gen_size,
-                         int max_executable_size) {
+                         intptr_t max_old_gen_size,
+                         intptr_t max_executable_size) {
   if (HasBeenSetup()) return false;
 
-  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+  if (max_semispace_size > 0) {
+    if (max_semispace_size < Page::kPageSize) {
+      max_semispace_size = Page::kPageSize;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be less than %dkbytes\n",
+               Page::kPageSize >> 10);
+      }
+    }
+    max_semispace_size_ = max_semispace_size;
+  }
 
   if (Snapshot::IsEnabled()) {
     // If we are using a snapshot we always reserve the default amount
@@ -5000,6 +5198,10 @@
     // than the default reserved semispace size.
     if (max_semispace_size_ > reserved_semispace_size_) {
       max_semispace_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be more than %dkbytes\n",
+               reserved_semispace_size_ >> 10);
+      }
     }
   } else {
     // If we are not using snapshots we reserve space for the actual
@@ -5025,8 +5227,12 @@
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   external_allocation_limit_ = 10 * max_semispace_size_;
 
-  // The old generation is paged.
-  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
+                                                       Page::kPageSize),
+                                 RoundUp(max_old_generation_size_,
+                                         Page::kPageSize));
 
   configured_ = true;
   return true;
@@ -5034,9 +5240,9 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
-                       FLAG_max_old_space_size * MB,
-                       FLAG_max_executable_size * MB);
+  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
+                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
+                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
 }
 
 
@@ -5064,7 +5270,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -5262,6 +5468,7 @@
 
 bool Heap::Setup(bool create_heap_objects) {
 #ifdef DEBUG
+  allocation_timeout_ = FLAG_gc_interval;
   debug_utils_ = new HeapDebugUtils(this);
 #endif
 
@@ -5280,31 +5487,21 @@
   gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-    initialized_gc = true;
-    InitializeScavengingVisitorsTables();
-    NewSpaceScavenger::Initialize();
-    MarkCompactCollector::Initialize();
+      initialized_gc = true;
+      InitializeScavengingVisitorsTables();
+      NewSpaceScavenger::Initialize();
+      MarkCompactCollector::Initialize();
   }
   gc_initializer_mutex->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
-  // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the requested reserved
-  // new space size to ensure that we can find a pair of semispaces that
-  // are contiguous and aligned to their size.
+  // Setup memory allocator.
   if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
       return false;
-  void* chunk =
-      isolate_->memory_allocator()->ReserveInitialChunk(
-          4 * reserved_semispace_size_);
-  if (chunk == NULL) return false;
 
-  // Align the pair of semispaces to their size, which must be a power
-  // of 2.
-  Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
-  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+  // Setup new space.
+  if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
     return false;
   }
 
@@ -5315,7 +5512,7 @@
                    OLD_POINTER_SPACE,
                    NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->Setup(NULL, 0)) return false;
+  if (!old_pointer_space_->Setup()) return false;
 
   // Initialize old data space.
   old_data_space_ =
@@ -5324,7 +5521,7 @@
                    OLD_DATA_SPACE,
                    NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->Setup(NULL, 0)) return false;
+  if (!old_data_space_->Setup()) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
@@ -5339,40 +5536,27 @@
   code_space_ =
       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
-  if (!code_space_->Setup(NULL, 0)) return false;
+  if (!code_space_->Setup()) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, FLAG_use_big_map_space
-      ? max_old_generation_size_
-      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
-      FLAG_max_map_space_pages,
-      MAP_SPACE);
+  map_space_ = new MapSpace(this,
+                            max_old_generation_size_,
+                            FLAG_max_map_space_pages,
+                            MAP_SPACE);
   if (map_space_ == NULL) return false;
-  if (!map_space_->Setup(NULL, 0)) return false;
+  if (!map_space_->Setup()) return false;
 
   // Initialize global property cell space.
   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
-  if (!cell_space_->Setup(NULL, 0)) return false;
+  if (!cell_space_->Setup()) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, LO_SPACE);
+  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
   if (lo_space_ == NULL) return false;
   if (!lo_space_->Setup()) return false;
-
-  // Setup the seed that is used to randomize the string hash function.
-  ASSERT(hash_seed() == 0);
-  if (FLAG_randomize_hashes) {
-    if (FLAG_hash_seed == 0) {
-      set_hash_seed(
-          Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
-    } else {
-      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
-    }
-  }
-
   if (create_heap_objects) {
     // Create initial maps.
     if (!CreateInitialMaps()) return false;
@@ -5387,6 +5571,8 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
+  store_buffer()->Setup();
+
   return true;
 }
 
@@ -5413,7 +5599,6 @@
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("mark_compact_count=%d ", mc_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5463,6 +5648,9 @@
     lo_space_ = NULL;
   }
 
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
   isolate_->memory_allocator()->TearDown();
 
 #ifdef DEBUG
@@ -5475,8 +5663,11 @@
 void Heap::Shrink() {
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->Shrink();
+  for (PagedSpace* space = spaces.next();
+       space != NULL;
+       space = spaces.next()) {
+    space->ReleaseAllUnusedPages();
+  }
 }
 
 
@@ -5679,98 +5870,54 @@
 };
 
 
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
-  FreeListNodesFilter() {
-    MarkFreeListNodes();
-  }
-
-  bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
-  }
-
- private:
-  void MarkFreeListNodes() {
-    Heap* heap = HEAP;
-    heap->old_pointer_space()->MarkFreeListNodes();
-    heap->old_data_space()->MarkFreeListNodes();
-    MarkCodeSpaceFreeListNodes(heap);
-    heap->map_space()->MarkFreeListNodes();
-    heap->cell_space()->MarkFreeListNodes();
-  }
-
-  void MarkCodeSpaceFreeListNodes(Heap* heap) {
-    // For code space, using FreeListNode::IsFreeListNode is OK.
-    HeapObjectIterator iter(heap->code_space());
-    for (HeapObject* obj = iter.next_object();
-         obj != NULL;
-         obj = iter.next_object()) {
-      if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
-    }
-  }
-
-  AssertNoAllocation no_alloc;
-};
-
-
 class UnreachableObjectsFilter : public HeapObjectsFilter {
  public:
   UnreachableObjectsFilter() {
-    MarkUnreachableObjects();
+    MarkReachableObjects();
+  }
+
+  ~UnreachableObjectsFilter() {
+    Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
   }
 
   bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    return !mark_bit.Get();
   }
 
  private:
-  class UnmarkingVisitor : public ObjectVisitor {
+  class MarkingVisitor : public ObjectVisitor {
    public:
-    UnmarkingVisitor() : list_(10) {}
+    MarkingVisitor() : marking_stack_(10) {}
 
     void VisitPointers(Object** start, Object** end) {
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
-        if (obj->IsMarked()) {
-          obj->ClearMark();
-          list_.Add(obj);
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        if (!mark_bit.Get()) {
+          mark_bit.Set();
+          marking_stack_.Add(obj);
         }
       }
     }
 
-    bool can_process() { return !list_.is_empty(); }
-
-    void ProcessNext() {
-      HeapObject* obj = list_.RemoveLast();
-      obj->Iterate(this);
+    void TransitiveClosure() {
+      while (!marking_stack_.is_empty()) {
+        HeapObject* obj = marking_stack_.RemoveLast();
+        obj->Iterate(this);
+      }
     }
 
    private:
-    List<HeapObject*> list_;
+    List<HeapObject*> marking_stack_;
   };
 
-  void MarkUnreachableObjects() {
-    HeapIterator iterator;
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      obj->SetMark();
-    }
-    UnmarkingVisitor visitor;
-    HEAP->IterateRoots(&visitor, VISIT_ALL);
-    while (visitor.can_process())
-      visitor.ProcessNext();
+  void MarkReachableObjects() {
+    Heap* heap = Isolate::Current()->heap();
+    MarkingVisitor visitor;
+    heap->IterateRoots(&visitor, VISIT_ALL);
+    visitor.TransitiveClosure();
   }
 
   AssertNoAllocation no_alloc;
@@ -5798,12 +5945,8 @@
 
 void HeapIterator::Init() {
   // Start the iteration.
-  space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
-      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+  space_iterator_ = new SpaceIterator;
   switch (filtering_) {
-    case kFilterFreeListNodes:
-      filter_ = new FreeListNodesFilter;
-      break;
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
       break;
@@ -5939,6 +6082,11 @@
 }
 
 
+static bool SafeIsGlobalContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+}
+
+
 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   if (!(*p)->IsHeapObject()) return;
 
@@ -5957,7 +6105,7 @@
     return;
   }
 
-  bool is_global_context = obj->IsGlobalContext();
+  bool is_global_context = SafeIsGlobalContext(obj);
 
   // not visited yet
   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6065,7 +6213,7 @@
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
-    holes_size += space->Waste() + space->AvailableFree();
+    holes_size += space->Waste() + space->Available();
   }
   return holes_size;
 }
@@ -6076,17 +6224,10 @@
       start_size_(0),
       gc_count_(0),
       full_gc_count_(0),
-      is_compacting_(false),
-      marked_count_(0),
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
       heap_(heap) {
-  // These two fields reflect the state of the previous full collection.
-  // Set them before they are changed by the collector.
-  previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
-  previous_marked_count_ =
-      heap_->mark_compact_collector_.previous_marked_count();
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
   start_size_ = heap_->SizeOfObjects();
@@ -6103,6 +6244,14 @@
   if (heap_->last_gc_end_timestamp_ > 0) {
     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   }
+
+  steps_count_ = heap_->incremental_marking()->steps_count();
+  steps_took_ = heap_->incremental_marking()->steps_took();
+  longest_step_ = heap_->incremental_marking()->longest_step();
+  steps_count_since_last_gc_ =
+      heap_->incremental_marking()->steps_count_since_last_gc();
+  steps_took_since_last_gc_ =
+      heap_->incremental_marking()->steps_took_since_last_gc();
 }
 
 
@@ -6137,7 +6286,21 @@
            SizeOfHeapObjects());
 
     if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%d ms.\n", time);
+    PrintF("%d ms", time);
+    if (steps_count_ > 0) {
+      if (collector_ == SCAVENGER) {
+        PrintF(" (+ %d ms in %d steps since last GC)",
+               static_cast<int>(steps_took_since_last_gc_),
+               steps_count_since_last_gc_);
+      } else {
+        PrintF(" (+ %d ms in %d steps since start of marking, "
+                   "biggest step %f ms)",
+               static_cast<int>(steps_took_),
+               steps_count_,
+               longest_step_);
+      }
+    }
+    PrintF(".\n");
   } else {
     PrintF("pause=%d ", time);
     PrintF("mutator=%d ",
@@ -6149,8 +6312,7 @@
         PrintF("s");
         break;
       case MARK_COMPACTOR:
-        PrintF("%s",
-               heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
+        PrintF("ms");
         break;
       default:
         UNREACHABLE();
@@ -6161,7 +6323,19 @@
     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
-    PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+    PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
+    PrintF("new_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
+    PrintF("root_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
+    PrintF("old_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
+    PrintF("compaction_ptrs=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
+    PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
+        Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
+    PrintF("misc_compaction=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
 
     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
@@ -6172,6 +6346,14 @@
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
 
+    if (collector_ == SCAVENGER) {
+      PrintF("stepscount=%d ", steps_count_since_last_gc_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+    } else {
+      PrintF("stepscount=%d ", steps_count_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+    }
+
     PrintF("\n");
   }
 
@@ -6184,8 +6366,7 @@
     case SCAVENGER:
       return "Scavenge";
     case MARK_COMPACTOR:
-      return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
-                                                           : "Mark-sweep";
+      return "Mark-sweep";
   }
   return "Unknown GC";
 }
@@ -6267,7 +6448,9 @@
 void ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     if (heap_->InNewSpace(new_space_strings_[i])) {
       new_space_strings_[last++] = new_space_strings_[i];
     } else {
@@ -6277,12 +6460,16 @@
   new_space_strings_.Rewind(last);
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
+    if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
+      continue;
+    }
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
     old_space_strings_[last++] = old_space_strings_[i];
   }
   old_space_strings_.Rewind(last);
-  Verify();
+  if (FLAG_verify_heap) {
+    Verify();
+  }
 }
 
 
@@ -6292,4 +6479,53 @@
 }
 
 
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize size, owner and flags field of every piece.
+      // If FromAnyPointerAddress encounters a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      MemoryChunk* inner = MemoryChunk::FromAddress(
+          chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(
+          chunk->address() + chunk->size() - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocateAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        inner->set_size(Page::kPageSize);
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(
+            inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
 } }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index b1948a9..1864f7b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -32,11 +32,15 @@
 
 #include "allocation.h"
 #include "globals.h"
+#include "incremental-marking.h"
 #include "list.h"
 #include "mark-compact.h"
+#include "objects-visiting.h"
 #include "spaces.h"
 #include "splay-tree-inl.h"
+#include "store-buffer.h"
 #include "v8-counters.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -48,30 +52,43 @@
 
 
 // Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V)                                      \
-  /* Put the byte array map early.  We need it to be in place by the time   */ \
-  /* the deserializer hits the next page, since it wants to put a byte      */ \
-  /* array in the unused space at the end of the page.                      */ \
+#define STRONG_ROOT_LIST(V)                                                    \
   V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Object, undefined_value, UndefinedValue)                                   \
-  V(Object, the_hole_value, TheHoleValue)                                      \
-  V(Object, null_value, NullValue)                                             \
-  V(Object, true_value, TrueValue)                                             \
-  V(Object, false_value, FalseValue)                                           \
-  V(Object, arguments_marker, ArgumentsMarker)                                 \
+  V(Smi, store_buffer_top, StoreBufferTop)                                     \
+  V(Oddball, undefined_value, UndefinedValue)                                  \
+  V(Oddball, the_hole_value, TheHoleValue)                                     \
+  V(Oddball, null_value, NullValue)                                            \
+  V(Oddball, true_value, TrueValue)                                            \
+  V(Oddball, false_value, FalseValue)                                          \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, meta_map, MetaMap)                                                    \
+  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
+  V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, serialized_scope_info_map, SerializedScopeInfoMap)                    \
+  V(Map, code_map, CodeMap)                                                    \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
   V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
-  V(Map, meta_map, MetaMap)                                                    \
   V(Map, hash_table_map, HashTableMap)                                         \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray)         \
+  V(String, empty_string, EmptyString)                                         \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Smi, stack_limit, StackLimit)                                              \
+  V(Oddball, frame_alignment_marker, FrameAlignmentMarker)                     \
+  V(Oddball, arguments_marker, ArgumentsMarker)                                \
+  /* The first 32 roots above this line should be boring from a GC point of */ \
+  /* view.  This means they are never in new space and never on a page that */ \
+  /* is being compacted.                                                    */ \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -79,20 +96,12 @@
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
   V(FixedArray, string_split_cache, StringSplitCache)                          \
   V(Object, termination_exception, TerminationException)                       \
-  V(Smi, hash_seed, HashSeed)                                                  \
-  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
-  V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray)         \
-  V(String, empty_string, EmptyString)                                         \
-  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Map, string_map, StringMap)                                                \
-  V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
   V(Map, sliced_string_map, SlicedStringMap)                                   \
   V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
-  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
   V(Map, cons_symbol_map, ConsSymbolMap)                                       \
   V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap)                            \
   V(Map, external_symbol_map, ExternalSymbolMap)                               \
@@ -101,6 +110,16 @@
   V(Map, external_string_map, ExternalStringMap)                               \
   V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap)  \
   V(Map, external_ascii_string_map, ExternalAsciiStringMap)                    \
+  V(Map, short_external_symbol_map, ShortExternalSymbolMap)                    \
+  V(Map,                                                                       \
+    short_external_symbol_with_ascii_data_map,                                 \
+    ShortExternalSymbolWithAsciiDataMap)                                       \
+  V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap)         \
+  V(Map, short_external_string_map, ShortExternalStringMap)                    \
+  V(Map,                                                                       \
+    short_external_string_with_ascii_data_map,                                 \
+    ShortExternalStringWithAsciiDataMap)                                       \
+  V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap)         \
   V(Map, undetectable_string_map, UndetectableStringMap)                       \
   V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap)            \
   V(Map, external_pixel_array_map, ExternalPixelArrayMap)                      \
@@ -117,19 +136,17 @@
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
   V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, code_map, CodeMap)                                                    \
   V(Map, oddball_map, OddballMap)                                              \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
-  V(Object, nan_value, NanValue)                                               \
-  V(Object, minus_zero_value, MinusZeroValue)                                  \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
   V(Foreign, prototype_accessors, PrototypeAccessors)                          \
-  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
-  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
+  V(NumberDictionary, code_stubs, CodeStubs)                                   \
+  V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache)              \
   V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
@@ -227,7 +244,9 @@
   V(closure_symbol, "(closure)")                                         \
   V(use_strict, "use strict")                                            \
   V(dot_symbol, ".")                                                     \
-  V(anonymous_function_symbol, "(anonymous function)")
+  V(anonymous_function_symbol, "(anonymous function)")                   \
+  V(infinity_symbol, "Infinity")                                         \
+  V(minus_infinity_symbol, "-Infinity")
 
 // Forward declarations.
 class GCTracer;
@@ -239,10 +258,26 @@
 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                       Object** pointer);
 
-typedef bool (*DirtyRegionCallback)(Heap* heap,
-                                    Address start,
-                                    Address end,
-                                    ObjectSlotCallback copy_object_func);
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {
+  }
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
+};
+
 
 
 // The all static Heap captures the interface to the global object heap.
@@ -257,32 +292,103 @@
 // by it's size to avoid dereferencing a map pointer for scanning.
 class PromotionQueue {
  public:
-  PromotionQueue() : front_(NULL), rear_(NULL) { }
+  explicit PromotionQueue(Heap* heap)
+      : front_(NULL),
+        rear_(NULL),
+        limit_(NULL),
+        emergency_stack_(0),
+        heap_(heap) { }
 
-  void Initialize(Address start_address) {
-    front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+  void Initialize();
+
+  void Destroy() {
+    ASSERT(is_empty());
+    delete emergency_stack_;
+    emergency_stack_ = NULL;
   }
 
-  bool is_empty() { return front_ <= rear_; }
+  inline void ActivateGuardIfOnTheSamePage();
+
+  Page* GetHeadPage() {
+    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  }
+
+  void SetNewLimit(Address limit) {
+    if (!guard_) {
+      return;
+    }
+
+    ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
+    limit_ = reinterpret_cast<intptr_t*>(limit);
+
+    if (limit_ <= rear_) {
+      return;
+    }
+
+    RelocateQueueHead();
+  }
+
+  bool is_empty() {
+    return (front_ == rear_) &&
+        (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+  }
 
   inline void insert(HeapObject* target, int size);
 
   void remove(HeapObject** target, int* size) {
+    ASSERT(!is_empty());
+    if (front_ == rear_) {
+      Entry e = emergency_stack_->RemoveLast();
+      *target = e.obj_;
+      *size = e.size_;
+      return;
+    }
+
+    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+      NewSpacePage* front_page =
+          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+      ASSERT(!front_page->prev_page()->is_anchor());
+      front_ =
+          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+    }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
     // Assert no underflow.
-    ASSERT(front_ >= rear_);
+    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                                reinterpret_cast<Address>(front_));
   }
 
  private:
-  // The front of the queue is higher in memory than the rear.
+  // The front of the queue is higher in the memory page chain than the rear.
   intptr_t* front_;
   intptr_t* rear_;
+  intptr_t* limit_;
+
+  bool guard_;
+
+  static const int kEntrySizeInWords = 2;
+
+  struct Entry {
+    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+
+    HeapObject* obj_;
+    int size_;
+  };
+  List<Entry>* emergency_stack_;
+
+  Heap* heap_;
+
+  void RelocateQueueHead();
 
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
 
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
+
+
 // External strings table is a place where all external strings are
 // registered.  We need to keep track of such strings to properly
 // finalize them.
@@ -328,8 +434,8 @@
   // Configure heap size before setup. Return false if the heap has been
   // setup already.
   bool ConfigureHeap(int max_semispace_size,
-                     int max_old_gen_size,
-                     int max_executable_size);
+                     intptr_t max_old_gen_size,
+                     intptr_t max_executable_size);
   bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
@@ -377,9 +483,6 @@
   // all available bytes. Check MaxHeapObjectSize() instead.
   intptr_t Available();
 
-  // Returns the maximum object size in paged space.
-  inline int MaxObjectSizeInPagedSpace();
-
   // Returns of size of all objects residing in the heap.
   intptr_t SizeOfObjects();
 
@@ -457,6 +560,7 @@
   // size, but keeping the original prototype.  The receiver must have at least
   // the size of the new object.  The object is reinitialized and behaves as an
   // object that has been freshly allocated.
+  // Returns failure if an error occured, otherwise object.
   MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
                                                       InstanceType type,
                                                       int size);
@@ -485,8 +589,10 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
-                                           int instance_size);
+  MUST_USE_RESULT MaybeObject* AllocateMap(
+      InstanceType instance_type,
+      int instance_size,
+      ElementsKind elements_kind = FAST_ELEMENTS);
 
   // Allocates a partial map for bootstrapping.
   MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -499,7 +605,7 @@
   MUST_USE_RESULT MaybeObject* AllocateCodeCache();
 
   // Allocates a serialized scope info.
-  MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
+  MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
 
   // Allocates an empty PolymorphicCodeCache.
   MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -689,7 +795,7 @@
   // Allocate a block context.
   MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
                                                     Context* previous,
-                                                    SerializedScopeInfo* info);
+                                                    ScopeInfo* info);
 
   // Allocates a new utility object in the old generation.
   MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -738,13 +844,15 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
+      int32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Converts an int into either a Smi or a HeapNumber object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
+      uint32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -797,9 +905,9 @@
   // failed.
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Finalizes an external string by deleting the associated external
   // data and clearing the resource pointer.
@@ -842,7 +950,8 @@
   // Please note this function does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
-  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
+  MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
+      Vector<const uc16> str);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
     return LookupSymbol(CStrVector(str));
   }
@@ -885,13 +994,24 @@
   // collect more garbage.
   inline bool CollectGarbage(AllocationSpace space);
 
-  // Performs a full garbage collection. Force compaction if the
-  // parameter is true.
-  void CollectAllGarbage(bool force_compaction);
+  static const int kNoGCFlags = 0;
+  static const int kMakeHeapIterableMask = 1;
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(int flags);
 
   // Last hope GC, should try to squeeze as much as possible.
   void CollectAllAvailableGarbage();
 
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void EnsureHeapIsIterable();
+
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed() { return ++contexts_disposed_; }
 
@@ -899,6 +1019,20 @@
   // ensure correct callback for weak global handles.
   void PerformScavenge();
 
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
 #ifdef DEBUG
@@ -925,6 +1059,8 @@
 
   // Heap root getters.  We have versions with and without type::cast() here.
   // You can't use type::cast during GC because the assert fails.
+  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
+  // not corrupt the stack.
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   type* name() {                                                               \
     return type::cast(roots_[k##camel_name##RootIndex]);                       \
@@ -958,6 +1094,9 @@
   }
   Object* global_contexts_list() { return global_contexts_list_; }
 
+  // Number of mark-sweeps.
+  int ms_count() { return ms_count_; }
+
   // Iterates over all roots in the heap.
   void IterateRoots(ObjectVisitor* v, VisitMode mode);
   // Iterates over all strong roots in the heap.
@@ -965,60 +1104,16 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
-  enum ExpectedPageWatermarkState {
-    WATERMARK_SHOULD_BE_VALID,
-    WATERMARK_CAN_BE_INVALID
-  };
-
-  // For each dirty region on a page in use from an old space call
-  // visit_dirty_region callback.
-  // If either visit_dirty_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  // All pages will be marked as having invalid watermark upon
-  // iteration completion.
-  void IterateDirtyRegions(
-      PagedSpace* space,
-      DirtyRegionCallback visit_dirty_region,
-      ObjectSlotCallback callback,
-      ExpectedPageWatermarkState expected_page_watermark_state);
-
-  // Interpret marks as a bitvector of dirty marks for regions of size
-  // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
-  // memory interval from start to top. For each dirty region call a
-  // visit_dirty_region callback. Return updated bitvector of dirty marks.
-  uint32_t IterateDirtyRegions(uint32_t marks,
-                               Address start,
-                               Address end,
-                               DirtyRegionCallback visit_dirty_region,
-                               ObjectSlotCallback callback);
-
   // Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
-  // Update dirty marks for page containing start address.
   void IterateAndMarkPointersToFromSpace(Address start,
                                          Address end,
                                          ObjectSlotCallback callback);
 
-  // Iterate pointers to new space found in memory interval from start to end.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyRegion(Heap* heap,
-                                           Address start,
-                                           Address end,
-                                           ObjectSlotCallback callback);
-
-
-  // Iterate pointers to new space found in memory interval from start to end.
-  // This interval is considered to belong to the map space.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyMapsRegion(Heap* heap,
-                                               Address start,
-                                               Address end,
-                                               ObjectSlotCallback callback);
-
-
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address addr);
+  inline bool InNewSpacePage(Address addr);
   inline bool InFromSpace(Object* object);
   inline bool InToSpace(Object* object);
 
@@ -1037,7 +1132,7 @@
   inline AllocationSpace TargetSpaceId(InstanceType type);
 
   // Sets the stub_cache_ (only used when expanding the dictionary).
-  void public_set_code_stubs(UnseededNumberDictionary* value) {
+  void public_set_code_stubs(NumberDictionary* value) {
     roots_[kCodeStubsRootIndex] = value;
   }
 
@@ -1049,7 +1144,7 @@
   }
 
   // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
-  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
+  void public_set_non_monomorphic_cache(NumberDictionary* value) {
     roots_[kNonMonomorphicCacheRootIndex] = value;
   }
 
@@ -1057,11 +1152,19 @@
     roots_[kEmptyScriptRootIndex] = script;
   }
 
+  void public_set_store_buffer_top(Address* top) {
+    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+  }
+
   // Update the next script id.
   inline void SetLastScriptId(Object* last_script_id);
 
   // Generated code can embed this address to get access to the roots.
-  Object** roots_address() { return roots_; }
+  Object** roots_array_start() { return roots_; }
+
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
 
   // Get address of global contexts list for serialization support.
   Object** global_contexts_list_address() {
@@ -1075,6 +1178,10 @@
   // Verify the heap is in its normal state before or after a GC.
   void Verify();
 
+  void OldPointerSpaceCheckStoreBuffer();
+  void MapSpaceCheckStoreBuffer();
+  void LargeObjectSpaceCheckStoreBuffer();
+
   // Report heap statistics.
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
@@ -1170,22 +1277,55 @@
   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
                                                      PretenureFlag pretenure);
 
+  inline intptr_t PromotedTotalSize() {
+    return PromotedSpaceSize() + PromotedExternalMemorySize();
+  }
+
   // True if we have reached the allocation limit in the old generation that
   // should force the next GC (caused normally) to be a full one.
-  bool OldGenerationPromotionLimitReached() {
-    return (PromotedSpaceSize() + PromotedExternalMemorySize())
-           > old_gen_promotion_limit_;
+  inline bool OldGenerationPromotionLimitReached() {
+    return PromotedTotalSize() > old_gen_promotion_limit_;
   }
 
-  intptr_t OldGenerationSpaceAvailable() {
-    return old_gen_allocation_limit_ -
-           (PromotedSpaceSize() + PromotedExternalMemorySize());
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_gen_allocation_limit_ - PromotedTotalSize();
   }
 
-  // True if we have reached the allocation limit in the old generation that
-  // should artificially cause a GC right now.
-  bool OldGenerationAllocationLimitReached() {
-    return OldGenerationSpaceAvailable() < 0;
+  static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
+  static const intptr_t kMinimumAllocationLimit =
+      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+  // When we sweep lazily we initially guess that there is no garbage on the
+  // heap and set the limits for the next GC accordingly.  As we sweep we find
+  // out that some of the pages contained garbage and we have to adjust
+  // downwards the size of the heap.  This means the limits that control the
+  // timing of the next GC also need to be adjusted downwards.
+  void LowerOldGenLimits(intptr_t adjustment) {
+    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+  }
+
+  intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
+    const int divisor = FLAG_stress_compaction ? 10 : 3;
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+    return Min(limit, halfway_to_the_max);
+  }
+
+  intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
+    const int divisor = FLAG_stress_compaction ? 8 : 2;
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+    return Min(limit, halfway_to_the_max);
   }
 
   // Can be called when the embedding application is idle.
@@ -1213,6 +1353,8 @@
 
   MUST_USE_RESULT MaybeObject* NumberToString(
       Object* number, bool check_number_string_cache = true);
+  MUST_USE_RESULT MaybeObject* Uint32ToString(
+      uint32_t value, bool check_number_string_cache = true);
 
   Map* MapForExternalArrayType(ExternalArrayType array_type);
   RootListIndex RootIndexForExternalArrayType(
@@ -1224,18 +1366,10 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
-  inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Optimized version of memmove for blocks with pointer size aligned sizes and
   // pointer size aligned addresses.
   static inline void MoveBlock(Address dst, Address src, int byte_size);
 
-  inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
@@ -1244,9 +1378,31 @@
     survived_since_last_expansion_ += survived;
   }
 
+  inline bool NextGCIsLikelyToBeFull() {
+    if (FLAG_gc_global) return true;
+
+    intptr_t total_promoted = PromotedTotalSize();
+
+    intptr_t adjusted_promotion_limit =
+        old_gen_promotion_limit_ - new_space_.Capacity();
+
+    if (total_promoted >= adjusted_promotion_limit) return true;
+
+    intptr_t adjusted_allocation_limit =
+        old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+
+    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+
+    return false;
+  }
+
+
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
 
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
   void ProcessWeakReferences(WeakObjectRetainer* retainer);
 
   // Helper function that governs the promotion policy from new space to
@@ -1263,6 +1419,9 @@
 
   GCTracer* tracer() { return tracer_; }
 
+  // Returns the size of objects residing in non new spaces.
+  intptr_t PromotedSpaceSize();
+
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
   void IncreaseTotalRegexpCodeGenerated(int size) {
     total_regexp_code_generated_ += size;
@@ -1281,6 +1440,18 @@
     return &mark_compact_collector_;
   }
 
+  StoreBuffer* store_buffer() {
+    return &store_buffer_;
+  }
+
+  Marking* marking() {
+    return &marking_;
+  }
+
+  IncrementalMarking* incremental_marking() {
+    return &incremental_marking_;
+  }
+
   ExternalStringTable* external_string_table() {
     return &external_string_table_;
   }
@@ -1291,22 +1462,31 @@
   }
 
   inline Isolate* isolate();
-  bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
-  void CallGlobalGCPrologueCallback() {
+  inline void CallGlobalGCPrologueCallback() {
     if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
   }
 
-  void CallGlobalGCEpilogueCallback() {
+  inline void CallGlobalGCEpilogueCallback() {
     if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
   }
 
-  uint32_t HashSeed() {
-    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
-    ASSERT(FLAG_randomize_hashes || seed == 0);
-    return seed;
+  inline bool OldGenerationAllocationLimitReached();
+
+  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
   }
 
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FreeQueuedChunks();
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
+  // The roots that have an index less than this are always in old space.
+  static const int kOldSpaceRoots = 0x20;
+
  private:
   Heap();
 
@@ -1314,12 +1494,12 @@
   // more expedient to get at the isolate directly from within Heap methods.
   Isolate* isolate_;
 
+  intptr_t code_range_size_;
   int reserved_semispace_size_;
   int max_semispace_size_;
   int initial_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t max_executable_size_;
-  intptr_t code_range_size_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
@@ -1334,6 +1514,8 @@
   // For keeping track of context disposals.
   int contexts_disposed_;
 
+  int scan_on_scavenge_pages_;
+
 #if defined(V8_TARGET_ARCH_X64)
   static const int kMaxObjectSizeInNewSpace = 1024*KB;
 #else
@@ -1350,13 +1532,9 @@
   HeapState gc_state_;
   int gc_post_processing_depth_;
 
-  // Returns the size of object residing in non new spaces.
-  intptr_t PromotedSpaceSize();
-
   // Returns the amount of external memory registered since last global gc.
   int PromotedExternalMemorySize();
 
-  int mc_count_;  // how many mark-compact collections happened
   int ms_count_;  // how many mark-sweep collections happened
   unsigned int gc_count_;  // how many gc happened
 
@@ -1364,7 +1542,10 @@
   int unflattened_strings_length_;
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline void set_##name(type* value) {                                 \
+  inline void set_##name(type* value) {                                        \
+    /* The deserializer makes use of the fact that these common roots are */   \
+    /* never in new space and never on a page that is being compacted.    */   \
+    ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value));  \
     roots_[k##camel_name##RootIndex] = value;                                  \
   }
   ROOT_LIST(ROOT_ACCESSOR)
@@ -1385,6 +1566,10 @@
   HeapDebugUtils* debug_utils_;
 #endif  // DEBUG
 
+  // Indicates that the new space should be kept small due to high promotion
+  // rates caused by the mutator allocating a lot of long-lived objects.
+  bool new_space_high_promotion_mode_active_;
+
   // Limit that triggers a global GC on the next (normally caused) GC.  This
   // is checked when we have already decided to do a GC to help determine
   // which collector to invoke.
@@ -1395,6 +1580,13 @@
   // every allocation in large object space.
   intptr_t old_gen_allocation_limit_;
 
+  // Sometimes the heuristics dictate that those limits are increased.  This
+  // variable records that fact.
+  int old_gen_limit_factor_;
+
+  // Used to adjust the limits that control the timing of the next GC.
+  intptr_t size_of_old_gen_at_last_old_space_gc_;
+
   // Limit on the amount of externally allocated memory allowed
   // between global GCs. If reached a global GC is forced.
   intptr_t external_allocation_limit_;
@@ -1414,6 +1606,8 @@
 
   Object* global_contexts_list_;
 
+  StoreBufferRebuilder store_buffer_rebuilder_;
+
   struct StringTypeTable {
     InstanceType type;
     int size;
@@ -1471,13 +1665,11 @@
   // Support for computing object sizes during GC.
   HeapObjectCallback gc_safe_size_of_old_object_;
   static int GcSafeSizeOfOldObject(HeapObject* object);
-  static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
 
   // Update the GC state. Called from the mark-compact collector.
   void MarkMapPointersAsEncoded(bool encoded) {
-    gc_safe_size_of_old_object_ = encoded
-        ? &GcSafeSizeOfOldObjectWithEncodedMap
-        : &GcSafeSizeOfOldObject;
+    ASSERT(!encoded);
+    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
   }
 
   // Checks whether a global GC is necessary
@@ -1489,11 +1681,10 @@
   bool PerformGarbageCollection(GarbageCollector collector,
                                 GCTracer* tracer);
 
-  static const intptr_t kMinimumPromotionLimit = 2 * MB;
-  static const intptr_t kMinimumAllocationLimit = 8 * MB;
 
   inline void UpdateOldSpaceLimits();
 
+
   // Allocate an uninitialized object in map space.  The behavior is identical
   // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
   // have to test the allocation space argument and (b) can reduce code size
@@ -1528,8 +1719,6 @@
   // Allocate empty fixed double array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
 
-  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
   // Performs a minor collection in new generation.
   void Scavenge();
 
@@ -1538,16 +1727,15 @@
       Object** pointer);
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  static void ScavengeStoreBufferCallback(Heap* heap,
+                                          MemoryChunk* page,
+                                          StoreBufferEvent event);
 
   // Performs a major collection in the whole heap.
   void MarkCompact(GCTracer* tracer);
 
   // Code to be run before and after mark-compact.
-  void MarkCompactPrologue(bool is_compacting);
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
+  void MarkCompactPrologue();
 
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
@@ -1557,12 +1745,11 @@
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
 
   // Initializes a function with a shared part and prototype.
-  // Returns the function.
   // Note: this code was factored out of AllocateFunction such that
   // other parts of the VM could use it. Specifically, a function that creates
   // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* InitializeFunction(
+  inline void InitializeFunction(
       JSFunction* function,
       SharedFunctionInfo* shared,
       Object* prototype);
@@ -1623,10 +1810,16 @@
     return survival_rate_trend() == INCREASING;
   }
 
+  bool IsDecreasingSurvivalTrend() {
+    return survival_rate_trend() == DECREASING;
+  }
+
   bool IsHighSurvivalRate() {
     return high_survival_rate_period_length_ > 0;
   }
 
+  void SelectScavengingVisitorsTable();
+
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
 
@@ -1646,10 +1839,11 @@
 
   MarkCompactCollector mark_compact_collector_;
 
-  // This field contains the meaning of the WATERMARK_INVALIDATED flag.
-  // Instead of clearing this flag from all pages we just flip
-  // its meaning at the beginning of a scavenge.
-  intptr_t page_watermark_invalidated_mark_;
+  StoreBuffer store_buffer_;
+
+  Marking marking_;
+
+  IncrementalMarking incremental_marking_;
 
   int number_idle_notifications_;
   unsigned int last_idle_notification_gc_count_;
@@ -1664,7 +1858,9 @@
 
   ExternalStringTable external_string_table_;
 
-  bool is_safe_to_read_maps_;
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+  MemoryChunk* chunks_queued_for_free_;
 
   friend class Factory;
   friend class GCTracer;
@@ -1763,29 +1959,6 @@
     }
   }
 };
-
-
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(HEAP->Contains(object));
-        ASSERT(object->map()->IsMap());
-        if (HEAP->InNewSpace(object)) {
-          ASSERT(HEAP->InToSpace(object));
-          Address addr = reinterpret_cast<Address>(current);
-          ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
-        }
-      }
-    }
-  }
-};
 #endif
 
 
@@ -1860,7 +2033,6 @@
  public:
   enum HeapObjectsFiltering {
     kNoFiltering,
-    kFilterFreeListNodes,
     kFilterUnreachable
   };
 
@@ -2084,7 +2256,13 @@
       MC_MARK,
       MC_SWEEP,
       MC_SWEEP_NEWSPACE,
-      MC_COMPACT,
+      MC_EVACUATE_PAGES,
+      MC_UPDATE_NEW_TO_NEW_POINTERS,
+      MC_UPDATE_ROOT_TO_NEW_POINTERS,
+      MC_UPDATE_OLD_TO_NEW_POINTERS,
+      MC_UPDATE_POINTERS_TO_EVACUATED,
+      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_UPDATE_MISC_POINTERS,
       MC_FLUSH_CODE,
       kNumberOfScopes
     };
@@ -2118,16 +2296,6 @@
   // Sets the full GC count.
   void set_full_gc_count(int count) { full_gc_count_ = count; }
 
-  // Sets the flag that this is a compacting full GC.
-  void set_is_compacting() { is_compacting_ = true; }
-  bool is_compacting() const { return is_compacting_; }
-
-  // Increment and decrement the count of marked objects.
-  void increment_marked_count() { ++marked_count_; }
-  void decrement_marked_count() { --marked_count_; }
-
-  int marked_count() { return marked_count_; }
-
   void increment_promoted_objects_size(int object_size) {
     promoted_objects_size_ += object_size;
   }
@@ -2152,23 +2320,6 @@
   // A count (including this one) of the number of full garbage collections.
   int full_gc_count_;
 
-  // True if the current GC is a compacting full collection, false
-  // otherwise.
-  bool is_compacting_;
-
-  // True if the *previous* full GC cwas a compacting collection (will be
-  // false if there has not been a previous full GC).
-  bool previous_has_compacted_;
-
-  // On a full GC, a count of the number of marked objects.  Incremented
-  // when an object is marked and decremented when an object's mark bit is
-  // cleared.  Will be zero on a scavenge collection.
-  int marked_count_;
-
-  // The count from the end of the previous full GC.  Will be zero if there
-  // was no previous full GC.
-  int previous_marked_count_;
-
   // Amounts of time spent in different scopes during GC.
   double scopes_[Scope::kNumberOfScopes];
 
@@ -2187,6 +2338,13 @@
   // Size of objects promoted during the current collection.
   intptr_t promoted_objects_size_;
 
+  // Incremental marking steps counters.
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+
   Heap* heap_;
 };
 
@@ -2298,6 +2456,46 @@
 };
 
 
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+  static bool IsMarked(HeapObject* object) {
+    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+  }
+
+  static void ClearMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+    ASSERT(!IsMarked(object));
+  }
+
+  static void SetMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+    ASSERT(IsMarked(object));
+  }
+
+  static Map* MapOfMarkedObject(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+  }
+
+  static int SizeOfMarkedObject(HeapObject* object) {
+    return object->SizeFromMap(MapOfMarkedObject(object));
+  }
+
+ private:
+  static const uintptr_t kNotMarkedBit = 0x1;
+  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+};
+
+
 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
 // Helper class for tracing paths to a search target Object from all roots.
 // The TracePathFrom() method can be used to trace paths from a specific
@@ -2356,7 +2554,6 @@
 };
 #endif  // DEBUG || LIVE_OBJECT_LIST
 
-
 } }  // namespace v8::internal
 
 #undef HEAP
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5630ce3..32c3abf 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -126,7 +126,9 @@
   bool may_overflow = false;  // Overflow is ignored here.
   lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
   upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+#ifdef DEBUG
   Verify();
+#endif
 }
 
 
@@ -173,7 +175,9 @@
   lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
   upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -183,7 +187,9 @@
   lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
   upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
   KeepOrder();
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -197,9 +203,11 @@
 }
 
 
+#ifdef DEBUG
 void Range::Verify() const {
   ASSERT(lower_ <= upper_);
 }
+#endif
 
 
 bool Range::MulAndCheckOverflow(Range* other) {
@@ -210,7 +218,9 @@
   int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
   lower_ = Min(Min(v1, v2), Min(v3, v4));
   upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
   Verify();
+#endif
   return may_overflow;
 }
 
@@ -234,25 +244,6 @@
 }
 
 
-const char* HType::ToShortString() {
-  switch (type_) {
-    case kTagged: return "t";
-    case kTaggedPrimitive: return "p";
-    case kTaggedNumber: return "n";
-    case kSmi: return "m";
-    case kHeapNumber: return "h";
-    case kString: return "s";
-    case kBoolean: return "b";
-    case kNonPrimitive: return "r";
-    case kJSArray: return "a";
-    case kJSObject: return "o";
-    case kUninitialized: return "z";
-  }
-  UNREACHABLE();
-  return "Unreachable code";
-}
-
-
 HType HType::TypeFromValue(Handle<Object> value) {
   HType result = HType::Tagged();
   if (value->IsSmi()) {
@@ -564,7 +555,7 @@
   // followed by a simulate instruction, we need to insert after the
   // simulate instruction instead.
   HInstruction* next = previous->next_;
-  if (previous->HasSideEffects() && next != NULL) {
+  if (previous->HasObservableSideEffects() && next != NULL) {
     ASSERT(next->IsSimulate());
     previous = next;
     next = previous->next_;
@@ -587,11 +578,10 @@
     HBasicBlock* other_block = other_operand->block();
     if (cur_block == other_block) {
       if (!other_operand->IsPhi()) {
-        HInstruction* cur = cur_block->first();
+        HInstruction* cur = this->previous();
         while (cur != NULL) {
-          ASSERT(cur != this);  // We should reach other_operand before!
           if (cur == other_operand) break;
-          cur = cur->next();
+          cur = cur->previous();
         }
         // Must reach other operand in the same block!
         ASSERT(cur == other_operand);
@@ -605,7 +595,7 @@
 
   // Verify that instructions that may have side-effects are followed
   // by a simulate instruction.
-  if (HasSideEffects() && !IsOsrEntry()) {
+  if (HasObservableSideEffects() && !IsOsrEntry()) {
     ASSERT(next()->IsSimulate());
   }
 
@@ -707,6 +697,14 @@
 }
 
 
+void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HReturn::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
 }
@@ -775,17 +773,33 @@
 
 void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" == ");
-  stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
+  stream->Add(" == %o", *type_literal_);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
+HValue* HConstant::Canonicalize() {
+  return HasNoUses() && !IsBlockEntry() ? NULL : this;
+}
+
+
+HValue* HTypeof::Canonicalize() {
+  return HasNoUses() && !IsBlockEntry() ? NULL : this;
+}
+
+
+void HTypeof::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
 }
 
 
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
+  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
 
   if (CanTruncateToInt32()) stream->Add(" truncating-int32");
   if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+  if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
 }
 
 
@@ -857,6 +871,23 @@
 }
 
 
+const char* HCheckInstanceType::GetCheckName() {
+  switch (check_) {
+    case IS_SPEC_OBJECT: return "object";
+    case IS_JS_ARRAY: return "array";
+    case IS_STRING: return "string";
+    case IS_SYMBOL: return "symbol";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+void HCheckInstanceType::PrintDataTo(StringStream* stream) {
+  stream->Add("%s ", GetCheckName());
+  HUnaryOperation::PrintDataTo(stream);
+}
+
+
 void HCallStub::PrintDataTo(StringStream* stream) {
   stream->Add("%s ",
               CodeStub::MajorName(major_key_, false));
@@ -1106,15 +1137,16 @@
 
 
 void HSimulate::PrintDataTo(StringStream* stream) {
-  stream->Add("id=%d ", ast_id());
-  if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
+  stream->Add("id=%d", ast_id());
+  if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
   if (values_.length() > 0) {
     if (pop_count_ > 0) stream->Add(" /");
     for (int i = 0; i < values_.length(); ++i) {
-      if (!HasAssignedIndexAt(i)) {
-        stream->Add(" push ");
-      } else {
+      if (i > 0) stream->Add(",");
+      if (HasAssignedIndexAt(i)) {
         stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+      } else {
+        stream->Add(" push ");
       }
       values_[i]->PrintNameTo(stream);
     }
@@ -1195,7 +1227,10 @@
 
 
 bool HArrayLiteral::IsCopyOnWrite() const {
-  return constant_elements()->map() == HEAP->fixed_cow_array_map();
+  Handle<FixedArray> constant_elements = this->constant_elements();
+  FixedArrayBase* constant_elements_values =
+      FixedArrayBase::cast(constant_elements->get(1));
+  return constant_elements_values->map() == HEAP->fixed_cow_array_map();
 }
 
 
@@ -1208,28 +1243,17 @@
 }
 
 
-Range* HBitAnd::InferRange() {
+Range* HBitwise::InferRange() {
+  if (op() == Token::BIT_XOR) return HValue::InferRange();
   int32_t left_mask = (left()->range() != NULL)
       ? left()->range()->Mask()
       : 0xffffffff;
   int32_t right_mask = (right()->range() != NULL)
       ? right()->range()->Mask()
       : 0xffffffff;
-  int32_t result_mask = left_mask & right_mask;
-  return (result_mask >= 0)
-      ? new Range(0, result_mask)
-      : HValue::InferRange();
-}
-
-
-Range* HBitOr::InferRange() {
-  int32_t left_mask = (left()->range() != NULL)
-      ? left()->range()->Mask()
-      : 0xffffffff;
-  int32_t right_mask = (right()->range() != NULL)
-      ? right()->range()->Mask()
-      : 0xffffffff;
-  int32_t result_mask = left_mask | right_mask;
+  int32_t result_mask = (op() == Token::BIT_AND)
+      ? left_mask & right_mask
+      : left_mask | right_mask;
   return (result_mask >= 0)
       ? new Range(0, result_mask)
       : HValue::InferRange();
@@ -1301,6 +1325,13 @@
 }
 
 
+void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
@@ -1311,6 +1342,14 @@
 }
 
 
+void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HGoto::PrintDataTo(StringStream* stream) {
   stream->Add("B%d", SuccessorAt(0)->block_id());
 }
@@ -1352,7 +1391,7 @@
        i < types->length() && types_.length() < kMaxLoadPolymorphism;
        ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup;
+    LookupResult lookup(map->GetIsolate());
     map->LookupInDescriptors(NULL, *name, &lookup);
     if (lookup.IsProperty()) {
       switch (lookup.type()) {
@@ -1405,14 +1444,14 @@
 
 void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(" .");
+  stream->Add(".");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
 
 void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(" .");
+  stream->Add(".");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
@@ -1425,7 +1464,7 @@
 }
 
 
-bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+bool HLoadKeyedFastElement::RequiresHoleCheck() {
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     HValue* use = it.value();
     if (!use->IsChange()) return true;
@@ -1442,11 +1481,6 @@
 }
 
 
-bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
-  return true;
-}
-
-
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1488,6 +1522,7 @@
       stream->Add("pixel");
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1513,10 +1548,10 @@
 void HStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add(".");
-  ASSERT(name()->IsString());
   stream->Add(*String::cast(*name())->ToCString());
   stream->Add(" = ");
   value()->PrintNameTo(stream);
+  stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
   if (!transition().is_null()) {
     stream->Add(" (transition map %p)", *transition());
   }
@@ -1582,6 +1617,7 @@
     case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -1596,9 +1632,26 @@
 }
 
 
+void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintNameTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
-  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
+}
+
+
+bool HLoadGlobalCell::RequiresHoleCheck() {
+  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return true;
+  }
+  return false;
 }
 
 
@@ -1610,6 +1663,8 @@
 void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
 }
 
 
@@ -1696,6 +1751,12 @@
 }
 
 
+HType HChange::CalculateInferredType() {
+  if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
+  return type();
+}
+
+
 HType HBitwiseBinaryOperation::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1711,21 +1772,6 @@
 }
 
 
-HType HBitAnd::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitXor::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
-HType HBitOr::CalculateInferredType() {
-  return HType::TaggedNumber();
-}
-
-
 HType HBitNot::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1736,18 +1782,33 @@
 }
 
 
-HType HShl::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HStringCharFromCode::CalculateInferredType() {
+  return HType::String();
 }
 
 
-HType HShr::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HArrayLiteral::CalculateInferredType() {
+  return HType::JSArray();
 }
 
 
-HType HSar::CalculateInferredType() {
-  return HType::TaggedNumber();
+HType HObjectLiteralFast::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
+HType HObjectLiteralGeneric::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
+HType HRegExpLiteral::CalculateInferredType() {
+  return HType::JSObject();
+}
+
+
+HType HFunctionLiteral::CalculateInferredType() {
+  return HType::JSObject();
 }
 
 
@@ -1838,6 +1899,167 @@
 }
 
 
+#define H_CONSTANT_INT32(val)                                                  \
+new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED),                   \
+                    Representation::Integer32())
+#define H_CONSTANT_DOUBLE(val)                                                 \
+new(zone) HConstant(FACTORY->NewNumber(val, TENURED),                          \
+                    Representation::Double())
+
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                       \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      double double_res = c_left->DoubleValue() op c_right->DoubleValue();     \
+      if (TypeInfo::IsInt32Double(double_res)) {                               \
+        return H_CONSTANT_INT32(static_cast<int32_t>(double_res));             \
+      }                                                                        \
+      return H_CONSTANT_DOUBLE(double_res);                                    \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
+
+#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
+
+
+HInstruction* HMod::NewHMod(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
+      int32_t dividend = c_left->Integer32Value();
+      int32_t divisor = c_right->Integer32Value();
+      if (divisor != 0) {
+        int32_t res = dividend % divisor;
+        if ((res == 0) && (dividend < 0)) {
+          return H_CONSTANT_DOUBLE(-0.0);
+        }
+        return H_CONSTANT_INT32(res);
+      }
+    }
+  }
+  return new(zone) HMod(context, left, right);
+}
+
+
+HInstruction* HDiv::NewHDiv(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  // If left and right are constant values, try to return a constant value.
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      if (c_right->DoubleValue() != 0) {
+        double double_res = c_left->DoubleValue() / c_right->DoubleValue();
+        if (TypeInfo::IsInt32Double(double_res)) {
+          return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+        }
+        return H_CONSTANT_DOUBLE(double_res);
+      }
+    }
+  }
+  return new(zone) HDiv(context, left, right);
+}
+
+
+HInstruction* HBitwise::NewHBitwise(Zone* zone,
+                                    Token::Value op,
+                                    HValue* context,
+                                    HValue* left,
+                                    HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t result;
+      int32_t v_left = c_left->NumberValueAsInteger32();
+      int32_t v_right = c_right->NumberValueAsInteger32();
+      switch (op) {
+        case Token::BIT_XOR:
+          result = v_left ^ v_right;
+          break;
+        case Token::BIT_AND:
+          result = v_left & v_right;
+          break;
+        case Token::BIT_OR:
+          result = v_left | v_right;
+          break;
+        default:
+          result = 0;  // Please the compiler.
+          UNREACHABLE();
+      }
+      return H_CONSTANT_INT32(result);
+    }
+  }
+  return new(zone) HBitwise(op, context, left, right);
+}
+
+
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                             \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      return H_CONSTANT_INT32(result);                                         \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_BITWISE_INSTR(HSar,
+c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
+DEFINE_NEW_H_BITWISE_INSTR(HShl,
+c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
+
+#undef DEFINE_NEW_H_BITWISE_INSTR
+
+
+HInstruction* HShr::NewHShr(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t left_val = c_left->NumberValueAsInteger32();
+      int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
+      if ((right_val == 0) && (left_val < 0)) {
+        return H_CONSTANT_DOUBLE(
+            static_cast<double>(static_cast<uint32_t>(left_val)));
+      }
+      return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+    }
+  }
+  return new(zone) HShr(context, left, right);
+}
+
+
+#undef H_CONSTANT_INT32
+#undef H_CONSTANT_DOUBLE
+
+
 void HIn::PrintDataTo(StringStream* stream) {
   key()->PrintNameTo(stream);
   stream->Add(" ");
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 0af5489..52fed88 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -67,10 +67,8 @@
   V(ArgumentsLength)                           \
   V(ArgumentsObject)                           \
   V(ArrayLiteral)                              \
-  V(BitAnd)                                    \
+  V(Bitwise)                                   \
   V(BitNot)                                    \
-  V(BitOr)                                     \
-  V(BitXor)                                    \
   V(BlockEntry)                                \
   V(BoundsCheck)                               \
   V(Branch)                                    \
@@ -118,10 +116,12 @@
   V(InstanceOfKnownGlobal)                     \
   V(InvokeFunction)                            \
   V(IsConstructCallAndBranch)                  \
-  V(IsNullAndBranch)                           \
+  V(IsNilAndBranch)                            \
   V(IsObjectAndBranch)                         \
+  V(IsStringAndBranch)                         \
   V(IsSmiAndBranch)                            \
   V(IsUndetectableAndBranch)                   \
+  V(StringCompareAndBranch)                    \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -139,7 +139,8 @@
   V(LoadNamedGeneric)                          \
   V(Mod)                                       \
   V(Mul)                                       \
-  V(ObjectLiteral)                             \
+  V(ObjectLiteralFast)                         \
+  V(ObjectLiteralGeneric)                      \
   V(OsrEntry)                                  \
   V(OuterContext)                              \
   V(Parameter)                                 \
@@ -171,6 +172,7 @@
   V(Throw)                                     \
   V(ToFastProperties)                          \
   V(ToInt32)                                   \
+  V(TransitionElementsKind)                    \
   V(Typeof)                                    \
   V(TypeofIsAndBranch)                         \
   V(UnaryMathOperation)                        \
@@ -182,6 +184,7 @@
   V(Calls)                                     \
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
+  V(ElementsKind)                              \
   V(ArrayElements)                             \
   V(DoubleArrayElements)                       \
   V(SpecializedArrayElements)                  \
@@ -245,7 +248,9 @@
     return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
   }
   void KeepOrder();
+#ifdef DEBUG
   void Verify() const;
+#endif
 
   void StackUpon(Range* other) {
     Intersect(other);
@@ -397,10 +402,14 @@
     return type_ == kUninitialized;
   }
 
+  bool IsHeapObject() {
+    ASSERT(type_ != kUninitialized);
+    return IsHeapNumber() || IsString() || IsNonPrimitive();
+  }
+
   static HType TypeFromValue(Handle<Object> value);
 
   const char* ToString();
-  const char* ToShortString();
 
  private:
   enum Type {
@@ -615,8 +624,14 @@
   void SetAllSideEffects() { flags_ |= AllSideEffects(); }
   void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
   bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+  bool HasObservableSideEffects() const {
+    return (flags_ & ObservableSideEffects()) != 0;
+  }
 
   int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
+  int ObservableChangesFlags() const {
+    return flags_ & ChangesFlagsMask() & ObservableSideEffects();
+  }
 
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
@@ -625,7 +640,7 @@
   void ComputeInitialRange();
 
   // Representation helpers.
-  virtual Representation RequiredInputRepresentation(int index) const = 0;
+  virtual Representation RequiredInputRepresentation(int index) = 0;
 
   virtual Representation InferredRepresentation() {
     return representation();
@@ -696,6 +711,12 @@
     return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
   }
 
+  // A flag mask of all side effects that can make observable changes in
+  // an executing program (i.e. are not safe to repeat, move or remove);
+  static int ObservableSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesElementsKind);
+  }
+
   // Remove the matching use from the use list if present.  Returns the
   // removed list node or NULL.
   HUseListNode* RemoveUse(HValue* value, int index);
@@ -841,7 +862,7 @@
 
 class HBlockEntry: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -854,7 +875,7 @@
 // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
 class HSoftDeoptimize: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -866,7 +887,7 @@
  public:
   explicit HDeoptimize(int environment_length) : values_(environment_length) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -908,10 +929,10 @@
 class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
-        SetSuccessorAt(0, target);
-      }
+    SetSuccessorAt(0, target);
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -951,7 +972,7 @@
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -983,7 +1004,7 @@
 
   Handle<Map> map() const { return map_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1000,7 +1021,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1014,7 +1035,7 @@
 
 class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1049,7 +1070,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1064,7 +1085,7 @@
  public:
   explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1083,7 +1104,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();  // Same as the output representation.
   }
 
@@ -1094,27 +1115,29 @@
 class HChange: public HUnaryOperation {
  public:
   HChange(HValue* value,
-          Representation from,
           Representation to,
           bool is_truncating,
           bool deoptimize_on_undefined)
-      : HUnaryOperation(value),
-        from_(from),
-        deoptimize_on_undefined_(deoptimize_on_undefined) {
-    ASSERT(!from.IsNone() && !to.IsNone());
-    ASSERT(!from.Equals(to));
+      : HUnaryOperation(value) {
+    ASSERT(!value->representation().IsNone() && !to.IsNone());
+    ASSERT(!value->representation().Equals(to));
     set_representation(to);
+    set_type(HType::TaggedNumber());
     SetFlag(kUseGVN);
+    if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
     if (is_truncating) SetFlag(kTruncatingToInt32);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+  virtual HType CalculateInferredType();
 
-  Representation from() const { return from_; }
-  Representation to() const { return representation(); }
-  bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return from_;
+  Representation from() { return value()->representation(); }
+  Representation to() { return representation(); }
+  bool deoptimize_on_undefined() const {
+    return CheckFlag(kDeoptimizeOnUndefined);
+  }
+  virtual Representation RequiredInputRepresentation(int index) {
+    return from();
   }
 
   virtual Range* InferRange();
@@ -1124,16 +1147,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    if (!other->IsChange()) return false;
-    HChange* change = HChange::cast(other);
-    return to().Equals(change->to())
-        && deoptimize_on_undefined() == change->deoptimize_on_undefined();
-  }
-
- private:
-  Representation from_;
-  bool deoptimize_on_undefined_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1145,7 +1159,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1164,7 +1178,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1223,7 +1237,7 @@
   virtual int OperandCount() { return values_.length(); }
   virtual HValue* OperandAt(int index) { return values_[index]; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1268,7 +1282,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1306,7 +1320,7 @@
   FunctionLiteral* function() const { return function_; }
   CallKind call_kind() const { return call_kind_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1323,7 +1337,7 @@
  public:
   HLeaveInlined() {}
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1337,7 +1351,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1349,19 +1363,27 @@
 
 class HThisFunction: public HTemplateInstruction<0> {
  public:
-  HThisFunction() {
+  explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
+  Handle<JSFunction> closure() const { return closure_; }
+
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    HThisFunction* b = HThisFunction::cast(other);
+    return *closure() == *b->closure();
+  }
+
+ private:
+  Handle<JSFunction> closure_;
 };
 
 
@@ -1372,7 +1394,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1392,7 +1414,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1410,7 +1432,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1429,7 +1451,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1465,7 +1487,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1485,7 +1507,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1500,7 +1522,7 @@
       : HBinaryCall(context, function, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1525,7 +1547,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1542,7 +1564,7 @@
       : HBinaryCall(context, key, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1566,7 +1588,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1575,15 +1597,16 @@
 };
 
 
-class HCallFunction: public HUnaryCall {
+class HCallFunction: public HBinaryCall {
  public:
-  HCallFunction(HValue* context, int argument_count)
-      : HUnaryCall(context, argument_count) {
+  HCallFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
   }
 
-  HValue* context() { return value(); }
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1602,7 +1625,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1622,7 +1645,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1639,7 +1662,7 @@
       : HBinaryCall(context, constructor, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1666,7 +1689,7 @@
   const Runtime::Function* function() const { return c_function_; }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1692,7 +1715,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1716,7 +1739,7 @@
     SetFlag(kDependsOnArrayLengths);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1732,10 +1755,10 @@
   explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetFlag(kDependsOnMaps);
+    SetFlag(kDependsOnElementsKind);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1754,7 +1777,7 @@
     SetFlag(kTruncatingToInt32);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
   virtual HType CalculateInferredType();
@@ -1804,7 +1827,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -1859,9 +1882,10 @@
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnMaps);
+    SetFlag(kDependsOnElementsKind);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1884,7 +1908,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1908,7 +1932,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1938,7 +1962,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1978,7 +2002,9 @@
     return new HCheckInstanceType(value, IS_SYMBOL);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2008,6 +2034,8 @@
     LAST_INTERVAL_CHECK = IS_JS_ARRAY
   };
 
+  const char* GetCheckName();
+
   HCheckInstanceType(HValue* value, Check check)
       : HUnaryOperation(value), check_(check) {
     set_representation(Representation::Tagged());
@@ -2025,7 +2053,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2071,7 +2099,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2102,7 +2130,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual HType CalculateInferredType();
@@ -2151,7 +2179,7 @@
   }
 
   virtual Range* InferRange();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();
   }
   virtual HType CalculateInferredType();
@@ -2243,7 +2271,7 @@
     SetFlag(kIsArguments);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2259,7 +2287,20 @@
 
   bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  bool ImmortalImmovable() const {
+    Heap* heap = HEAP;
+    if (*handle_ == heap->undefined_value()) return true;
+    if (*handle_ == heap->null_value()) return true;
+    if (*handle_ == heap->true_value()) return true;
+    if (*handle_ == heap->false_value()) return true;
+    if (*handle_ == heap->the_hole_value()) return true;
+    if (*handle_ == heap->minus_zero_value()) return true;
+    if (*handle_ == heap->nan_value()) return true;
+    if (*handle_ == heap->empty_string()) return true;
+    return false;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2272,6 +2313,7 @@
   }
 
   virtual bool EmitAtUses() { return !representation().IsDouble(); }
+  virtual HValue* Canonicalize();
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
   bool IsInteger() const { return handle_->IsSmi(); }
@@ -2287,6 +2329,12 @@
     ASSERT(HasDoubleValue());
     return double_value_;
   }
+  bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+  int32_t NumberValueAsInteger32() const {
+    ASSERT(HasNumberValue());
+    if (has_int32_value_) return int32_value_;
+    return DoubleToInt32(double_value_);
+  }
   bool HasStringValue() const { return handle_->IsString(); }
 
   bool ToBoolean() const;
@@ -2367,7 +2415,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -2394,7 +2442,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2410,7 +2458,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2433,7 +2481,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
@@ -2459,7 +2507,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2484,7 +2532,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2522,7 +2570,7 @@
   }
 
   virtual HType CalculateInferredType();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2549,7 +2597,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2587,7 +2635,7 @@
     return input_representation_;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return input_representation_;
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2610,7 +2658,9 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2629,7 +2679,7 @@
   HValue* left() { return value(); }
   int right() const { return right_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2641,21 +2691,25 @@
 };
 
 
-class HIsNullAndBranch: public HUnaryControlInstruction {
+class HIsNilAndBranch: public HUnaryControlInstruction {
  public:
-  HIsNullAndBranch(HValue* value, bool is_strict)
-      : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
+  HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
+      : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
 
-  bool is_strict() const { return is_strict_; }
+  EqualityKind kind() const { return kind_; }
+  NilValue nil() const { return nil_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
 
  private:
-  bool is_strict_;
+  EqualityKind kind_;
+  NilValue nil_;
 };
 
 
@@ -2664,13 +2718,25 @@
   explicit HIsObjectAndBranch(HValue* value)
     : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 };
 
+class HIsStringAndBranch: public HUnaryControlInstruction {
+ public:
+  explicit HIsStringAndBranch(HValue* value)
+    : HUnaryControlInstruction(value, NULL, NULL) { }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+};
+
 
 class HIsSmiAndBranch: public HUnaryControlInstruction {
  public:
@@ -2679,7 +2745,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2693,7 +2759,7 @@
   explicit HIsUndetectableAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2701,9 +2767,45 @@
 };
 
 
+class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+ public:
+  HStringCompareAndBranch(HValue* context,
+                           HValue* left,
+                           HValue* right,
+                           Token::Value token)
+      : token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    set_representation(Representation::Tagged());
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* left() { return OperandAt(1); }
+  HValue* right() { return OperandAt(2); }
+  Token::Value token() const { return token_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  Representation GetInputRepresentation() const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
+
+ private:
+  Token::Value token_;
+};
+
+
 class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2725,7 +2827,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2742,7 +2844,7 @@
   explicit HHasCachedArrayIndexAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2757,7 +2859,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2776,7 +2878,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2800,7 +2902,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2817,7 +2919,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2845,7 +2947,7 @@
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2870,7 +2972,7 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
       ? Representation::Double()
       : Representation::None();
@@ -2898,6 +3000,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHAdd(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Add)
@@ -2918,6 +3025,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHSub(Zone* zone,
+                              HValue* context,
+                              HValue* left,
+                              HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
@@ -2941,6 +3053,11 @@
     return !representation().IsTagged();
   }
 
+  static HInstruction* NewHMul(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
@@ -2969,6 +3086,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHMod(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
@@ -2988,6 +3110,12 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+
+  static HInstruction* NewHDiv(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
@@ -2997,52 +3125,36 @@
 };
 
 
-class HBitAnd: public HBitwiseBinaryOperation {
+class HBitwise: public HBitwiseBinaryOperation {
  public:
-  HBitAnd(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
+  HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right), op_(op) {
+        ASSERT(op == Token::BIT_AND ||
+               op == Token::BIT_OR ||
+               op == Token::BIT_XOR);
+      }
+
+  Token::Value op() const { return op_; }
 
   virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
+  static HInstruction* NewHBitwise(Zone* zone,
+                                   Token::Value op,
+                                   HValue* context,
+                                   HValue* left,
+                                   HValue* right);
+
+  DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    return op() == HBitwise::cast(other)->op();
+  }
 
   virtual Range* InferRange();
-};
 
-
-class HBitXor: public HBitwiseBinaryOperation {
- public:
-  HBitXor(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitXor)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HBitOr: public HBitwiseBinaryOperation {
- public:
-  HBitOr(HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right) { }
-
-  virtual bool IsCommutative() const { return true; }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(BitOr)
-
- protected:
-  virtual bool DataEquals(HValue* other) { return true; }
-
-  virtual Range* InferRange();
+ private:
+  Token::Value op_;
 };
 
 
@@ -3052,7 +3164,11 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+
+  static HInstruction* NewHShl(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
@@ -3067,7 +3183,11 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+
+  static HInstruction* NewHShr(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
@@ -3082,7 +3202,11 @@
       : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual Range* InferRange();
-  virtual HType CalculateInferredType();
+
+  static HInstruction* NewHSar(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
 
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
@@ -3099,7 +3223,7 @@
 
   int ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3120,7 +3244,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3152,7 +3276,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3168,7 +3292,7 @@
  public:
   HUnknownOSRValue() { set_representation(Representation::Tagged()); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3178,15 +3302,15 @@
 
 class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
-      : cell_(cell), check_hole_value_(check_hole_value) {
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
+      : cell_(cell), details_(details) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck();
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3195,7 +3319,7 @@
     return reinterpret_cast<intptr_t>(*cell_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3209,7 +3333,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3234,7 +3358,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3246,21 +3370,33 @@
 };
 
 
+inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+  return !value->type().IsBoolean()
+      && !value->type().IsSmi()
+      && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
+}
+
+
 class HStoreGlobalCell: public HUnaryOperation {
  public:
   HStoreGlobalCell(HValue* value,
                    Handle<JSGlobalPropertyCell> cell,
-                   bool check_hole_value)
+                   PropertyDetails details)
       : HUnaryOperation(value),
         cell_(cell),
-        check_hole_value_(check_hole_value) {
+        details_(details) {
     SetFlag(kChangesGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell> cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck() {
+    return !details_.IsDontDelete() || details_.IsReadOnly();
+  }
+  bool NeedsWriteBarrier() {
+    return StoringValueNeedsWriteBarrier(value());
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3269,7 +3405,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3279,9 +3415,9 @@
                       HValue* global_object,
                       Handle<Object> name,
                       HValue* value,
-                      bool strict_mode)
+                      StrictModeFlag strict_mode_flag)
       : name_(name),
-        strict_mode_(strict_mode) {
+        strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     SetOperandAt(2, value);
@@ -3293,11 +3429,11 @@
   HValue* global_object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
   HValue* value() { return OperandAt(2); }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3305,7 +3441,7 @@
 
  private:
   Handle<Object> name_;
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
 };
 
 
@@ -3320,7 +3456,7 @@
 
   int slot_index() const { return slot_index_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3339,13 +3475,6 @@
 };
 
 
-static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
-  return !value->type().IsBoolean()
-      && !value->type().IsSmi()
-      && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
-}
-
-
 class HStoreContextSlot: public HTemplateInstruction<2> {
  public:
   HStoreContextSlot(HValue* context, int slot_index, HValue* value)
@@ -3363,7 +3492,7 @@
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3396,7 +3525,7 @@
   bool is_in_object() const { return is_in_object_; }
   int offset() const { return offset_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3428,7 +3557,7 @@
   Handle<String> name() { return name_; }
   bool need_generic() { return need_generic_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3463,7 +3592,7 @@
   HValue* object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3487,7 +3616,7 @@
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3511,7 +3640,7 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3520,7 +3649,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
+  bool RequiresHoleCheck();
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
@@ -3542,7 +3671,7 @@
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3551,8 +3680,6 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
 
  protected:
@@ -3582,7 +3709,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32, but the base pointer
     // for the element load is a naked pointer.
     return index == 0
@@ -3625,7 +3752,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3654,7 +3781,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3686,9 +3813,9 @@
                      HValue* object,
                      Handle<String> name,
                      HValue* value,
-                     bool strict_mode)
+                     StrictModeFlag strict_mode_flag)
       : name_(name),
-        strict_mode_(strict_mode) {
+        strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -3699,11 +3826,11 @@
   HValue* value() { return OperandAt(1); }
   HValue* context() { return OperandAt(2); }
   Handle<String> name() { return name_; }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3711,20 +3838,22 @@
 
  private:
   Handle<String> name_;
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
 };
 
 
 class HStoreKeyedFastElement: public HTemplateInstruction<3> {
  public:
-  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
+                         ElementsKind elements_kind = FAST_ELEMENTS)
+      : elements_kind_(elements_kind) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
     SetFlag(kChangesArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 1
         ? Representation::Integer32()
@@ -3734,14 +3863,28 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
+  bool value_is_smi() {
+    return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
+  }
 
   bool NeedsWriteBarrier() {
-    return StoringValueNeedsWriteBarrier(value());
+    if (value_is_smi()) {
+      return false;
+    } else {
+      return StoringValueNeedsWriteBarrier(value());
+    }
+  }
+
+  bool ValueNeedsSmiCheck() {
+    return value_is_smi();
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+
+ private:
+  ElementsKind elements_kind_;
 };
 
 
@@ -3756,7 +3899,7 @@
     SetFlag(kChangesDoubleArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 1) {
       return Representation::Integer32();
     } else if (index == 2) {
@@ -3795,7 +3938,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::External();
     } else {
@@ -3828,8 +3971,8 @@
                      HValue* object,
                      HValue* key,
                      HValue* value,
-                     bool strict_mode)
-      : strict_mode_(strict_mode) {
+                     StrictModeFlag strict_mode_flag)
+      : strict_mode_flag_(strict_mode_flag) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -3841,9 +3984,9 @@
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   HValue* context() { return OperandAt(3); }
-  bool strict_mode() { return strict_mode_; }
+  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3852,7 +3995,45 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
  private:
-  bool strict_mode_;
+  StrictModeFlag strict_mode_flag_;
+};
+
+
+class HTransitionElementsKind: public HTemplateInstruction<1> {
+ public:
+  HTransitionElementsKind(HValue* object,
+                          Handle<Map> original_map,
+                          Handle<Map> transitioned_map)
+      : original_map_(original_map),
+        transitioned_map_(transitioned_map) {
+    SetOperandAt(0, object);
+    SetFlag(kUseGVN);
+    SetFlag(kChangesElementsKind);
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  HValue* object() { return OperandAt(0); }
+  Handle<Map> original_map() { return original_map_; }
+  Handle<Map> transitioned_map() { return transitioned_map_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+
+ protected:
+  virtual bool DataEquals(HValue* other) {
+    HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
+    return original_map_.is_identical_to(instr->original_map()) &&
+        transitioned_map_.is_identical_to(instr->transitioned_map());
+  }
+
+ private:
+  Handle<Map> original_map_;
+  Handle<Map> transitioned_map_;
 };
 
 
@@ -3865,7 +4046,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3891,7 +4072,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The index is supposed to be Integer32.
     return index == 2
         ? Representation::Integer32()
@@ -3918,15 +4099,16 @@
   HStringCharFromCode(HValue* context, HValue* char_code) {
     SetOperandAt(0, context);
     SetOperandAt(1, char_code);
-     set_representation(Representation::Tagged());
+    set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
   }
+  virtual HType CalculateInferredType();
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
@@ -3945,7 +4127,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4001,9 +4183,10 @@
 
   bool IsCopyOnWrite() const;
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
 
@@ -4013,14 +4196,49 @@
 };
 
 
-class HObjectLiteral: public HMaterializedLiteral<1> {
+class HObjectLiteralFast: public HMaterializedLiteral<1> {
  public:
-  HObjectLiteral(HValue* context,
-                 Handle<FixedArray> constant_properties,
-                 bool fast_elements,
-                 int literal_index,
-                 int depth,
-                 bool has_function)
+  HObjectLiteralFast(HValue* context,
+                     Handle<JSObject> boilerplate,
+                     int total_size,
+                     int literal_index,
+                     int depth)
+      : HMaterializedLiteral<1>(literal_index, depth),
+        boilerplate_(boilerplate),
+        total_size_(total_size) {
+    SetOperandAt(0, context);
+  }
+
+  // Maximum depth and total number of properties for object literal
+  // graphs to be considered for fast deep-copying.
+  static const int kMaxObjectLiteralDepth = 3;
+  static const int kMaxObjectLiteralProperties = 8;
+
+  HValue* context() { return OperandAt(0); }
+  Handle<JSObject> boilerplate() const { return boilerplate_; }
+  int total_size() const { return total_size_; }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast)
+
+ private:
+  Handle<JSObject> boilerplate_;
+  int total_size_;
+};
+
+
+class HObjectLiteralGeneric: public HMaterializedLiteral<1> {
+ public:
+  HObjectLiteralGeneric(HValue* context,
+                        Handle<FixedArray> constant_properties,
+                        bool fast_elements,
+                        int literal_index,
+                        int depth,
+                        bool has_function)
       : HMaterializedLiteral<1>(literal_index, depth),
         constant_properties_(constant_properties),
         fast_elements_(fast_elements),
@@ -4035,11 +4253,12 @@
   bool fast_elements() const { return fast_elements_; }
   bool has_function() const { return has_function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric)
 
  private:
   Handle<FixedArray> constant_properties_;
@@ -4065,9 +4284,10 @@
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
 
@@ -4089,9 +4309,10 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
 
@@ -4115,7 +4336,10 @@
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual HValue* Canonicalize();
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4129,11 +4353,11 @@
     // This instruction is not marked as having side effects, but
     // changes the map of the input operand. Use it only when creating
     // object literals.
-    ASSERT(value->IsObjectLiteral());
+    ASSERT(value->IsObjectLiteralGeneric() || value->IsObjectLiteralFast());
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4147,7 +4371,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4163,7 +4387,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4190,7 +4414,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* object() { return OperandAt(2); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index c625fba..5cf6e3d 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -164,10 +164,11 @@
 }
 
 
-void HBasicBlock::Goto(HBasicBlock* block) {
+void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
   if (block->IsInlineReturnTarget()) {
     AddInstruction(new(zone()) HLeaveInlined);
     last_environment_ = last_environment()->outer();
+    if (drop_extra) last_environment_->Drop(1);
   }
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(block);
@@ -175,11 +176,14 @@
 }
 
 
-void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value,
+                                  HBasicBlock* target,
+                                  bool drop_extra) {
   ASSERT(target->IsInlineReturnTarget());
   ASSERT(return_value != NULL);
   AddInstruction(new(zone()) HLeaveInlined);
   last_environment_ = last_environment()->outer();
+  if (drop_extra) last_environment_->Drop(1);
   last_environment()->Push(return_value);
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(target);
@@ -422,7 +426,7 @@
 };
 
 
-void HGraph::Verify() const {
+void HGraph::Verify(bool do_full_verify) const {
   for (int i = 0; i < blocks_.length(); i++) {
     HBasicBlock* block = blocks_.at(i);
 
@@ -473,25 +477,27 @@
   // Check special property of first block to have no predecessors.
   ASSERT(blocks_.at(0)->predecessors()->is_empty());
 
-  // Check that the graph is fully connected.
-  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
-  ASSERT(analyzer.visited_count() == blocks_.length());
+  if (do_full_verify) {
+    // Check that the graph is fully connected.
+    ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+    ASSERT(analyzer.visited_count() == blocks_.length());
 
-  // Check that entry block dominator is NULL.
-  ASSERT(entry_block_->dominator() == NULL);
+    // Check that entry block dominator is NULL.
+    ASSERT(entry_block_->dominator() == NULL);
 
-  // Check dominators.
-  for (int i = 0; i < blocks_.length(); ++i) {
-    HBasicBlock* block = blocks_.at(i);
-    if (block->dominator() == NULL) {
-      // Only start block may have no dominator assigned to.
-      ASSERT(i == 0);
-    } else {
-      // Assert that block is unreachable if dominator must not be visited.
-      ReachabilityAnalyzer dominator_analyzer(entry_block_,
-                                              blocks_.length(),
-                                              block->dominator());
-      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+    // Check dominators.
+    for (int i = 0; i < blocks_.length(); ++i) {
+      HBasicBlock* block = blocks_.at(i);
+      if (block->dominator() == NULL) {
+        // Only start block may have no dominator assigned to.
+        ASSERT(i == 0);
+      } else {
+        // Assert that block is unreachable if dominator must not be visited.
+        ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                                blocks_.length(),
+                                                block->dominator());
+        ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+      }
     }
   }
 }
@@ -539,7 +545,7 @@
 HGraphBuilder::HGraphBuilder(CompilationInfo* info,
                              TypeFeedbackOracle* oracle)
     : function_state_(NULL),
-      initial_function_state_(this, info, oracle),
+      initial_function_state_(this, info, oracle, false),
       ast_context_(NULL),
       break_scope_(NULL),
       graph_(NULL),
@@ -728,6 +734,7 @@
       Postorder(it.Current(), visited, order, block);
     }
   } else {
+    ASSERT(block->IsFinished());
     for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
       Postorder(it.Current(), visited, order, loop_header);
     }
@@ -750,7 +757,7 @@
       // All others are back edges, and thus cannot dominate the loop header.
       blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
     } else {
-      for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+      for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
         blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
       }
     }
@@ -850,7 +857,7 @@
 }
 
 
-bool HGraph::CheckPhis() {
+bool HGraph::CheckArgumentsPhiUses() {
   int block_count = blocks_.length();
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -863,13 +870,11 @@
 }
 
 
-bool HGraph::CollectPhis() {
+bool HGraph::CheckConstPhiUses() {
   int block_count = blocks_.length();
-  phi_list_ = new ZoneList<HPhi*>(block_count);
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
-      phi_list_->Add(phi);
       // Check for the hole value (from an uninitialized const).
       for (int k = 0; k < phi->OperandCount(); k++) {
         if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -880,6 +885,18 @@
 }
 
 
+void HGraph::CollectPhis() {
+  int block_count = blocks_.length();
+  phi_list_ = new ZoneList<HPhi*>(block_count);
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      phi_list_->Add(phi);
+    }
+  }
+}
+
+
 void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
   BitVector in_worklist(GetMaximumValueID());
   for (int i = 0; i < worklist->length(); ++i) {
@@ -1330,6 +1347,7 @@
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
+        removed_side_effects_(false),
         block_side_effects_(graph->blocks()->length()),
         loop_side_effects_(graph->blocks()->length()),
         visited_on_paths_(graph->zone(), graph->blocks()->length()) {
@@ -1341,7 +1359,8 @@
     ASSERT(!info_->isolate()->heap()->allow_allocation(true));
   }
 
-  void Analyze();
+  // Returns true if values with side effects are removed.
+  bool Analyze();
 
  private:
   int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
@@ -1361,6 +1380,7 @@
 
   HGraph* graph_;
   CompilationInfo* info_;
+  bool removed_side_effects_;
 
   // A map of block IDs to their side effects.
   ZoneList<int> block_side_effects_;
@@ -1374,13 +1394,14 @@
 };
 
 
-void HGlobalValueNumberer::Analyze() {
+bool HGlobalValueNumberer::Analyze() {
   ComputeBlockSideEffects();
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
   AnalyzeBlock(graph_->entry_block(), map);
+  return removed_side_effects_;
 }
 
 
@@ -1514,11 +1535,12 @@
     HInstruction* next = instr->next();
     int flags = instr->ChangesFlags();
     if (flags != 0) {
-      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
       // Clear all instructions in the map that are affected by side effects.
       map->Kill(flags);
       TraceGVN("Instruction %d kills\n", instr->id());
-    } else if (instr->CheckFlag(HValue::kUseGVN)) {
+    }
+    if (instr->CheckFlag(HValue::kUseGVN)) {
+      ASSERT(!instr->HasObservableSideEffects());
       HValue* other = map->Lookup(instr);
       if (other != NULL) {
         ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1527,6 +1549,7 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
+        if (instr->HasSideEffects()) removed_side_effects_ = true;
         instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
@@ -1656,7 +1679,7 @@
   }
 
   // Prefer unboxing over boxing, the latter is more expensive.
-  if (tagged_count > non_tagged_count) Representation::None();
+  if (tagged_count > non_tagged_count) return Representation::None();
 
   // Prefer Integer32 over Double, if possible.
   if (int32_count > 0 && value->IsConvertibleToInteger()) {
@@ -1851,7 +1874,7 @@
   }
 
   if (new_value == NULL) {
-    new_value = new(zone()) HChange(value, value->representation(), to,
+    new_value = new(zone()) HChange(value, to,
                                     is_truncating, deoptimize_on_undefined);
   }
 
@@ -1996,11 +2019,13 @@
 // a (possibly inlined) function.
 FunctionState::FunctionState(HGraphBuilder* owner,
                              CompilationInfo* info,
-                             TypeFeedbackOracle* oracle)
+                             TypeFeedbackOracle* oracle,
+                             bool drop_extra)
     : owner_(owner),
       compilation_info_(info),
       oracle_(oracle),
       call_context_(NULL),
+      drop_extra_(drop_extra),
       function_return_(NULL),
       test_context_(NULL),
       outer_(owner->function_state()) {
@@ -2090,12 +2115,12 @@
 void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
   ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -2113,12 +2138,12 @@
   }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
-  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout("bad value context for arguments object value");
   }
@@ -2143,7 +2168,7 @@
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
-  if (instr->HasSideEffects()) {
+  if (instr->HasObservableSideEffects()) {
     builder->Push(instr);
     builder->AddSimulate(ast_id);
     builder->Pop();
@@ -2153,14 +2178,14 @@
 
 
 void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasSideEffects());
+  ASSERT(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
   instr->SetSuccessorAt(1, empty_false);
   owner()->current_block()->Finish(instr);
-  empty_true->Goto(if_true());
-  empty_false->Goto(if_false());
+  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
   owner()->set_current_block(NULL);
 }
 
@@ -2181,8 +2206,8 @@
   HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
   builder->current_block()->Finish(test);
 
-  empty_true->Goto(if_true());
-  empty_false->Goto(if_false());
+  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
+  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
   builder->set_current_block(NULL);
 }
 
@@ -2302,7 +2327,7 @@
     // Handle implicit declaration of the function name in named function
     // expressions before other declarations.
     if (scope->is_function_scope() && scope->function() != NULL) {
-      HandleDeclaration(scope->function(), Variable::CONST, NULL);
+      HandleDeclaration(scope->function(), CONST, NULL);
     }
     VisitDeclarations(scope->declarations());
     AddSimulate(AstNode::kDeclarationsId);
@@ -2323,17 +2348,24 @@
 
   graph()->OrderBlocks();
   graph()->AssignDominators();
+
+#ifdef DEBUG
+  // Do a full verify after building the graph and computing dominators.
+  graph()->Verify(true);
+#endif
+
   graph()->PropagateDeoptimizingMark();
+  if (!graph()->CheckConstPhiUses()) {
+    Bailout("Unsupported phi use of const variable");
+    return NULL;
+  }
   graph()->EliminateRedundantPhis();
-  if (!graph()->CheckPhis()) {
-    Bailout("Unsupported phi use of arguments object");
+  if (!graph()->CheckArgumentsPhiUses()) {
+    Bailout("Unsupported phi use of arguments");
     return NULL;
   }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
-  if (!graph()->CollectPhis()) {
-    Bailout("Unsupported phi use of uninitialized constant");
-    return NULL;
-  }
+  graph()->CollectPhis();
 
   HInferRepresentation rep(graph());
   rep.Analyze();
@@ -2348,7 +2380,13 @@
   if (FLAG_use_gvn) {
     HPhase phase("Global value numbering", graph());
     HGlobalValueNumberer gvn(graph(), info());
-    gvn.Analyze();
+    bool removed_side_effects = gvn.Analyze();
+    // Trigger a second analysis pass to further eliminate duplicate values that
+    // could only be discovered by removing side-effect-generating instructions
+    // during the first pass.
+    if (FLAG_smi_only_arrays && removed_side_effects) {
+      gvn.Analyze();
+    }
   }
 
   if (FLAG_use_range) {
@@ -2636,12 +2674,14 @@
                       test->if_false());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return());
+      current_block()->Goto(function_return(), function_state()->drop_extra());
     } else {
       ASSERT(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = environment()->Pop();
-      current_block()->AddLeaveInlined(return_value, function_return());
+      current_block()->AddLeaveInlined(return_value,
+                                       function_return(),
+                                       function_state()->drop_extra());
     }
     set_current_block(NULL);
   }
@@ -2669,43 +2709,98 @@
     return Bailout("SwitchStatement: too many clauses");
   }
 
+  HValue* context = environment()->LookupContext();
+
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
 
-  // 1. Build all the tests, with dangling true branches.  Unconditionally
-  // deoptimize if we encounter a non-smi comparison.
+  SwitchType switch_type = UNKNOWN_SWITCH;
+
+  // 1. Extract clause type
   for (int i = 0; i < clause_count; ++i) {
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
-    if (!clause->label()->IsSmiLiteral()) {
-      return Bailout("SwitchStatement: non-literal switch label");
+
+    if (switch_type == UNKNOWN_SWITCH) {
+      if (clause->label()->IsSmiLiteral()) {
+        switch_type = SMI_SWITCH;
+      } else if (clause->label()->IsStringLiteral()) {
+        switch_type = STRING_SWITCH;
+      } else {
+        return Bailout("SwitchStatement: non-literal switch label");
+      }
+    } else if ((switch_type == STRING_SWITCH &&
+                !clause->label()->IsStringLiteral()) ||
+               (switch_type == SMI_SWITCH &&
+                !clause->label()->IsSmiLiteral())) {
+      return Bailout("SwitchStatemnt: mixed label types are not supported");
+    }
+  }
+
+  HUnaryControlInstruction* string_check = NULL;
+  HBasicBlock* not_string_block = NULL;
+
+  // Test switch's tag value if all clauses are string literals
+  if (switch_type == STRING_SWITCH) {
+    string_check = new(zone()) HIsStringAndBranch(tag_value);
+    first_test_block = graph()->CreateBasicBlock();
+    not_string_block = graph()->CreateBasicBlock();
+
+    string_check->SetSuccessorAt(0, first_test_block);
+    string_check->SetSuccessorAt(1, not_string_block);
+    current_block()->Finish(string_check);
+
+    set_current_block(first_test_block);
+  }
+
+  // 2. Build all the tests, with dangling true branches
+  int default_id = AstNode::kNoNumber;
+  for (int i = 0; i < clause_count; ++i) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) {
+      default_id = clause->EntryId();
+      continue;
+    }
+    if (switch_type == SMI_SWITCH) {
+      clause->RecordTypeFeedback(oracle());
     }
 
-    // Unconditionally deoptimize on the first non-smi compare.
-    clause->RecordTypeFeedback(oracle());
-    if (!clause->IsSmiCompare()) {
-      // Finish with deoptimize and add uses of enviroment values to
-      // account for invisible uses.
-      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-      set_current_block(NULL);
-      break;
-    }
-
-    // Otherwise generate a compare and branch.
+    // Generate a compare and branch.
     CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-    HCompareIDAndBranch* compare =
-        new(zone()) HCompareIDAndBranch(tag_value,
-                                        label_value,
-                                        Token::EQ_STRICT);
-    compare->SetInputRepresentation(Representation::Integer32());
-    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
     HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
+    HControlInstruction* compare;
+
+    if (switch_type == SMI_SWITCH) {
+      if (!clause->IsSmiCompare()) {
+        // Finish with deoptimize and add uses of enviroment values to
+        // account for invisible uses.
+        current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+        set_current_block(NULL);
+        break;
+      }
+
+      HCompareIDAndBranch* compare_ =
+          new(zone()) HCompareIDAndBranch(tag_value,
+                                          label_value,
+                                          Token::EQ_STRICT);
+      compare_->SetInputRepresentation(Representation::Integer32());
+      compare = compare_;
+    } else {
+      compare = new(zone()) HStringCompareAndBranch(context, tag_value,
+                                                     label_value,
+                                                     Token::EQ_STRICT);
+    }
+
     compare->SetSuccessorAt(0, body_block);
     compare->SetSuccessorAt(1, next_test_block);
     current_block()->Finish(compare);
+
     set_current_block(next_test_block);
   }
 
@@ -2713,10 +2808,18 @@
   // exit.  This block is NULL if we deoptimized.
   HBasicBlock* last_block = current_block();
 
-  // 2. Loop over the clauses and the linked list of tests in lockstep,
+  if (not_string_block != NULL) {
+    int join_id = (default_id != AstNode::kNoNumber)
+        ? default_id
+        : stmt->ExitId();
+    last_block = CreateJoin(last_block, not_string_block, join_id);
+  }
+
+  // 3. Loop over the clauses and the linked list of tests in lockstep,
   // translating the clause bodies.
   HBasicBlock* curr_test_block = first_test_block;
   HBasicBlock* fall_through_block = NULL;
+
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
     for (int i = 0; i < clause_count; ++i) {
@@ -3125,12 +3228,22 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->var();
-  if (variable->mode() == Variable::LET) {
+  if (variable->mode() == LET) {
     return Bailout("reference to let variable");
   }
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
-      LookupResult lookup;
+      // Handle known global constants like 'undefined' specially to avoid a
+      // load from a global cell for them.
+      Handle<Object> constant_value =
+          isolate()->factory()->GlobalConstantFor(variable->name());
+      if (!constant_value.is_null()) {
+        HConstant* instr =
+            new(zone()) HConstant(constant_value, Representation::Tagged());
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
+
+      LookupResult lookup(isolate());
       GlobalPropertyAccess type =
           LookupGlobalProperty(variable, &lookup, false);
 
@@ -3142,8 +3255,8 @@
       if (type == kUseCell) {
         Handle<GlobalObject> global(info()->global_object());
         Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-        bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-        HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+        HLoadGlobalCell* instr =
+            new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
         return ast_context()->ReturnInstruction(instr, expr->id());
       } else {
         HValue* context = environment()->LookupContext();
@@ -3162,7 +3275,7 @@
     case Variable::PARAMETER:
     case Variable::LOCAL: {
       HValue* value = environment()->Lookup(variable);
-      if (variable->mode() == Variable::CONST &&
+      if (variable->mode() == CONST &&
           value == graph()->GetConstantHole()) {
         return Bailout("reference to uninitialized const variable");
       }
@@ -3170,7 +3283,7 @@
     }
 
     case Variable::CONTEXT: {
-      if (variable->mode() == Variable::CONST) {
+      if (variable->mode() == CONST) {
         return Bailout("reference to const context slot");
       }
       HValue* context = BuildContextChainWalk(variable);
@@ -3209,18 +3322,78 @@
 }
 
 
+// Determines whether the given object literal boilerplate satisfies all
+// limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+static bool IsFastObjectLiteral(Handle<JSObject> boilerplate,
+                                int max_depth,
+                                int* max_properties,
+                                int* total_size) {
+  if (max_depth <= 0) return false;
+
+  FixedArrayBase* elements = boilerplate->elements();
+  if (elements->length() > 0 &&
+      elements->map() != HEAP->fixed_cow_array_map()) {
+    return false;
+  }
+
+  FixedArray* properties = boilerplate->properties();
+  if (properties->length() > 0) {
+    return false;
+  } else {
+    int nof = boilerplate->map()->inobject_properties();
+    for (int i = 0; i < nof; i++) {
+      if ((*max_properties)-- <= 0) return false;
+      Handle<Object> value(boilerplate->InObjectPropertyAt(i));
+      if (value->IsJSObject()) {
+        Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+        if (!IsFastObjectLiteral(value_object,
+                                 max_depth - 1,
+                                 max_properties,
+                                 total_size)) {
+          return false;
+        }
+      }
+    }
+  }
+
+  *total_size += boilerplate->map()->instance_size();
+  return true;
+}
+
+
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
+  Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   HValue* context = environment()->LookupContext();
-  HObjectLiteral* literal =
-      new(zone()) HObjectLiteral(context,
-                                 expr->constant_properties(),
-                                 expr->fast_elements(),
-                                 expr->literal_index(),
-                                 expr->depth(),
-                                 expr->has_function());
+  HInstruction* literal;
+
+  // Check whether to use fast or slow deep-copying for boilerplate.
+  int total_size = 0;
+  int max_properties = HObjectLiteralFast::kMaxObjectLiteralProperties;
+  Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
+  if (boilerplate->IsJSObject() &&
+      IsFastObjectLiteral(Handle<JSObject>::cast(boilerplate),
+                          HObjectLiteralFast::kMaxObjectLiteralDepth,
+                          &max_properties,
+                          &total_size)) {
+    Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
+    literal = new(zone()) HObjectLiteralFast(context,
+                                             boilerplate_object,
+                                             total_size,
+                                             expr->literal_index(),
+                                             expr->depth());
+  } else {
+    literal = new(zone()) HObjectLiteralGeneric(context,
+                                                expr->constant_properties(),
+                                                expr->fast_elements(),
+                                                expr->literal_index(),
+                                                expr->depth(),
+                                                expr->has_function());
+  }
+
   // The object is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3250,7 +3423,7 @@
                                 literal,
                                 name,
                                 value,
-                                function_strict_mode());
+                                function_strict_mode_flag());
             AddInstruction(store);
             AddSimulate(key->id());
           } else {
@@ -3311,16 +3484,49 @@
     HValue* value = Pop();
     if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
-    // Load the elements array before the first store.
-    if (elements == NULL)  {
-      elements = new(zone()) HLoadElements(literal);
-      AddInstruction(elements);
-    }
+    elements = new(zone()) HLoadElements(literal);
+    AddInstruction(elements);
 
     HValue* key = AddInstruction(
         new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
                               Representation::Integer32()));
+    HInstruction* elements_kind =
+        AddInstruction(new(zone()) HElementsKind(literal));
+    HBasicBlock* store_fast = graph()->CreateBasicBlock();
+    // Two empty blocks to satisfy edge split form.
+    HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
+    HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
+    HBasicBlock* store_generic = graph()->CreateBasicBlock();
+    HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
+    HBasicBlock* join = graph()->CreateBasicBlock();
+
+    HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
+    smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
+    smicheck->SetSuccessorAt(1, check_smi_only_elements);
+    current_block()->Finish(smicheck);
+    store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
+
+    set_current_block(check_smi_only_elements);
+    HCompareConstantEqAndBranch* smi_elements_check =
+        new(zone()) HCompareConstantEqAndBranch(elements_kind,
+                                                FAST_ELEMENTS,
+                                                Token::EQ_STRICT);
+    smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
+    smi_elements_check->SetSuccessorAt(1, store_generic);
+    current_block()->Finish(smi_elements_check);
+    store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
+
+    set_current_block(store_fast);
     AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+    store_fast->Goto(join);
+
+    set_current_block(store_generic);
+    AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
+    store_generic->Goto(join);
+
+    join->SetJoinId(expr->id());
+    set_current_block(join);
+
     AddSimulate(expr->GetIdForElement(i));
   }
   return ast_context()->ReturnValue(Pop());
@@ -3395,7 +3601,7 @@
                          object,
                          name,
                          value,
-                         function_strict_mode());
+                         function_strict_mode_flag());
 }
 
 
@@ -3409,7 +3615,7 @@
   Handle<String> name = Handle<String>::cast(key->handle());
   ASSERT(!name.is_null());
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   SmallMapList* types = expr->GetReceiverTypes();
   bool is_monomorphic = expr->IsMonomorphic() &&
       ComputeStoredField(types->first(), name, &lookup);
@@ -3433,7 +3639,7 @@
   HBasicBlock* join = NULL;
   for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup;
+    LookupResult lookup(isolate());
     if (ComputeStoredField(map, name, &lookup)) {
       if (count == 0) {
         AddInstruction(new(zone()) HCheckNonSmi(object));  // Only needed once.
@@ -3476,7 +3682,7 @@
       // The HSimulate for the store should not see the stored value in
       // effect contexts (it is not materialized at expr->id() in the
       // unoptimized code).
-      if (instr->HasSideEffects()) {
+      if (instr->HasObservableSideEffects()) {
         if (ast_context()->IsEffect()) {
           AddSimulate(expr->id());
         } else {
@@ -3516,7 +3722,7 @@
     ASSERT(!name.is_null());
 
     SmallMapList* types = expr->GetReceiverTypes();
-    LookupResult lookup;
+    LookupResult lookup(isolate());
 
     if (expr->IsMonomorphic()) {
       instr = BuildStoreNamed(object, value, expr);
@@ -3549,7 +3755,7 @@
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+  if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
   return ast_context()->ReturnValue(Pop());
 }
 
@@ -3561,16 +3767,16 @@
                                                    HValue* value,
                                                    int position,
                                                    int ast_id) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
     Handle<GlobalObject> global(info()->global_object());
     Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+    HInstruction* instr =
+        new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
     instr->set_position(position);
     AddInstruction(instr);
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   } else {
     HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3580,11 +3786,11 @@
                                         global_object,
                                         var->name(),
                                         value,
-                                        function_strict_mode());
+                                        function_strict_mode_flag());
     instr->set_position(position);
     AddInstruction(instr);
-    ASSERT(instr->HasSideEffects());
-    if (instr->HasSideEffects()) AddSimulate(ast_id);
+    ASSERT(instr->HasObservableSideEffects());
+    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
   }
 }
 
@@ -3601,7 +3807,7 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST || var->mode() == Variable::LET)  {
+    if (var->mode() == CONST || var->mode() == LET)  {
       return Bailout("unsupported let or const compound assignment");
     }
 
@@ -3641,7 +3847,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -3667,7 +3875,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -3675,14 +3883,14 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       HInstruction* store = BuildStoreNamed(obj, instr, prop);
       AddInstruction(store);
       // Drop the simulated receiver and value.  Return the value.
       Drop(2);
       Push(instr);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
       return ast_context()->ReturnValue(Pop());
 
     } else {
@@ -3707,7 +3915,7 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasSideEffects()) AddSimulate(operation->id());
+      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
 
       expr->RecordTypeFeedback(oracle());
       HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -3746,7 +3954,7 @@
     HandlePropertyAssignment(expr);
   } else if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST) {
+    if (var->mode() == CONST) {
       if (expr->op() != Token::INIT_CONST) {
         return Bailout("non-initializer assignment to const");
       }
@@ -3757,7 +3965,7 @@
       // variables (e.g. initialization inside a loop).
       HValue* old_value = environment()->Lookup(var);
       AddInstruction(new HUseConst(old_value));
-    } else if (var->mode() == Variable::LET) {
+    } else if (var->mode() == LET) {
       return Bailout("unsupported assignment to let");
     }
 
@@ -3785,7 +3993,7 @@
       }
 
       case Variable::CONTEXT: {
-        ASSERT(var->mode() != Variable::CONST);
+        ASSERT(var->mode() != CONST);
         // Bail out if we try to mutate a parameter value in a function using
         // the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
@@ -3805,7 +4013,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         return ast_context()->ReturnValue(Pop());
       }
 
@@ -3876,7 +4086,7 @@
                                             Property* expr,
                                             Handle<Map> map,
                                             Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   map->LookupInDescriptors(NULL, *name, &lookup);
   if (lookup.IsProperty() && lookup.type() == FIELD) {
     return BuildLoadNamedField(obj,
@@ -3931,6 +4141,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
         break;
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3947,24 +4158,48 @@
 }
 
 
+HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
+                                                    HValue* checked_key,
+                                                    HValue* val,
+                                                    ElementsKind elements_kind,
+                                                    bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+      return new(zone()) HStoreKeyedFastDoubleElement(
+          elements, checked_key, val);
+    } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+      return new(zone()) HStoreKeyedFastElement(
+          elements, checked_key, val, elements_kind);
+    }
+  }
+  // It's an element load (!is_store).
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+  } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+    return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+  }
+}
+
+
 HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
                                                            HValue* key,
                                                            HValue* val,
-                                                           Expression* expr,
+                                                           Handle<Map> map,
                                                            bool is_store) {
-  ASSERT(expr->IsMonomorphic());
-  Handle<Map> map = expr->GetMonomorphicReceiverType();
-  if (!map->has_fast_elements() &&
-      !map->has_fast_double_elements() &&
+  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+  bool fast_smi_only_elements = map->has_fast_smi_only_elements();
+  bool fast_elements = map->has_fast_elements();
+  bool fast_double_elements = map->has_fast_double_elements();
+  if (!fast_smi_only_elements &&
+      !fast_elements &&
+      !fast_double_elements &&
       !map->has_external_array_elements()) {
     return is_store ? BuildStoreKeyedGeneric(object, key, val)
                     : BuildLoadKeyedGeneric(object, key);
   }
-  AddInstruction(new(zone()) HCheckNonSmi(object));
-  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
-  bool fast_double_elements = map->has_fast_double_elements();
-  if (is_store && map->has_fast_elements()) {
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
     AddInstruction(new(zone()) HCheckMap(
         elements, isolate()->factory()->fixed_array_map()));
   }
@@ -3979,28 +4214,15 @@
     return BuildExternalArrayElementAccess(external_elements, checked_key,
                                            val, map->elements_kind(), is_store);
   }
-  ASSERT(map->has_fast_elements() || fast_double_elements);
+  ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
   if (map->instance_type() == JS_ARRAY_TYPE) {
     length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
   } else {
     length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
   }
   checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-  if (is_store) {
-    if (fast_double_elements) {
-      return new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                      checked_key,
-                                                      val);
-    } else {
-      return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
-    }
-  } else {
-    if (fast_double_elements) {
-      return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
-    } else {
-      return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-    }
-  }
+  return BuildFastElementAccess(elements, checked_key, val,
+                                map->elements_kind(), is_store);
 }
 
 
@@ -4014,7 +4236,6 @@
                                                       bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
-  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   SmallMapList* maps = prop->GetReceiverTypes();
   bool todo_external_array = false;
 
@@ -4024,15 +4245,55 @@
     type_todo[i] = false;
   }
 
+  // Elements_kind transition support.
+  MapHandleList transition_target(maps->length());
+  // Collect possible transition targets.
+  MapHandleList possible_transitioned_maps(maps->length());
   for (int i = 0; i < maps->length(); ++i) {
-    ASSERT(maps->at(i)->IsMap());
-    type_todo[maps->at(i)->elements_kind()] = true;
-    if (maps->at(i)->elements_kind()
-        >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
-      todo_external_array = true;
+    Handle<Map> map = maps->at(i);
+    ElementsKind elements_kind = map->elements_kind();
+    if (elements_kind == FAST_DOUBLE_ELEMENTS ||
+        elements_kind == FAST_ELEMENTS) {
+      possible_transitioned_maps.Add(map);
+    }
+  }
+  // Get transition target for each map (NULL == no transition).
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    Handle<Map> transitioned_map =
+        map->FindTransitionedMap(&possible_transitioned_maps);
+    transition_target.Add(transitioned_map);
+  }
+
+  int num_untransitionable_maps = 0;
+  Handle<Map> untransitionable_map;
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    ASSERT(map->IsMap());
+    if (!transition_target.at(i).is_null()) {
+      object = AddInstruction(new(zone()) HTransitionElementsKind(
+          object, map, transition_target.at(i)));
+    } else {
+      type_todo[map->elements_kind()] = true;
+      if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+        todo_external_array = true;
+      }
+      num_untransitionable_maps++;
+      untransitionable_map = map;
     }
   }
 
+  // If only one map is left after transitioning, handle this case
+  // monomorphically.
+  if (num_untransitionable_maps == 1) {
+    HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
+        object, key, val, untransitionable_map, is_store));
+    *has_side_effects |= instr->HasObservableSideEffects();
+    instr->set_position(position);
+    return is_store ? NULL : instr;
+  }
+
+  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   HBasicBlock* join = graph()->CreateBasicBlock();
 
   HInstruction* elements_kind_instr =
@@ -4042,14 +4303,20 @@
   HLoadExternalArrayPointer* external_elements = NULL;
   HInstruction* checked_key = NULL;
 
-  // FAST_ELEMENTS is assumed to be the first case.
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
+  // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
+  // arrays.
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
 
-  for (ElementsKind elements_kind = FAST_ELEMENTS;
+  for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
        elements_kind <= LAST_ELEMENTS_KIND;
        elements_kind = ElementsKind(elements_kind + 1)) {
-    // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
-    // need to add some code that's executed for all external array cases.
+    // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
+    // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
+    // that's executed for all external array cases.
     STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
                   LAST_ELEMENTS_KIND);
     if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4071,15 +4338,25 @@
 
       set_current_block(if_true);
       HInstruction* access;
-      if (elements_kind == FAST_ELEMENTS ||
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_ELEMENTS ||
           elements_kind == FAST_DOUBLE_ELEMENTS) {
-        bool fast_double_elements =
-            elements_kind == FAST_DOUBLE_ELEMENTS;
-        if (is_store && elements_kind == FAST_ELEMENTS) {
+        if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+          AddInstruction(new(zone()) HCheckSmi(val));
+        }
+        if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
           AddInstruction(new(zone()) HCheckMap(
               elements, isolate()->factory()->fixed_array_map(),
               elements_kind_branch));
         }
+        // TODO(jkummerow): The need for these two blocks could be avoided
+        // in one of two ways:
+        // (1) Introduce ElementsKinds for JSArrays that are distinct from
+        //     those for fast objects.
+        // (2) Put the common instructions into a third "join" block. This
+        //     requires additional AST IDs that we can deopt to from inside
+        //     that join block. They must be added to the Property class (when
+        //     it's a keyed property) and registered in the full codegen.
         HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
         HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
         HHasInstanceTypeAndBranch* typecheck =
@@ -4089,30 +4366,16 @@
         current_block()->Finish(typecheck);
 
         set_current_block(if_jsarray);
-        HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
-        AddInstruction(length);
+        HInstruction* length;
+        length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
+        if (!is_store) {
           Push(access);
         }
-        *has_side_effects |= access->HasSideEffects();
+
+        *has_side_effects |= access->HasObservableSideEffects();
         if (position != -1) {
           access->set_position(position);
         }
@@ -4121,25 +4384,8 @@
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
-        }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
       } else if (elements_kind == DICTIONARY_ELEMENTS) {
         if (is_store) {
           access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4150,7 +4396,7 @@
         access = AddInstruction(BuildExternalArrayElementAccess(
             external_elements, checked_key, val, elements_kind, is_store));
       }
-      *has_side_effects |= access->HasSideEffects();
+      *has_side_effects |= access->HasObservableSideEffects();
       access->set_position(position);
       if (!is_store) {
         Push(access);
@@ -4179,7 +4425,9 @@
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
-    instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+    Handle<Map> map = expr->GetMonomorphicReceiverType();
+    AddInstruction(new(zone()) HCheckNonSmi(obj));
+    instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
   } else if (expr->GetReceiverTypes() != NULL &&
              !expr->GetReceiverTypes()->is_empty()) {
     return HandlePolymorphicElementAccess(
@@ -4193,7 +4441,7 @@
   }
   instr->set_position(position);
   AddInstruction(instr);
-  *has_side_effects = instr->HasSideEffects();
+  *has_side_effects = instr->HasObservableSideEffects();
   return instr;
 }
 
@@ -4207,7 +4455,7 @@
                          object,
                          key,
                          value,
-                         function_strict_mode());
+                         function_strict_mode_flag());
 }
 
 bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
@@ -4260,7 +4508,7 @@
   CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
-  if (expr->IsArrayLength()) {
+  if (expr->AsProperty()->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
     HInstruction* mapcheck =
@@ -4449,7 +4697,7 @@
 }
 
 
-bool HGraphBuilder::TryInline(Call* expr) {
+bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
   if (!FLAG_use_inlining) return false;
 
   // The function call we are inlining is a method call if the call
@@ -4477,7 +4725,8 @@
     return false;
   }
 
-  // No context change required.
+#if !defined(V8_TARGET_ARCH_IA32)
+  // Target must be able to use caller's context.
   CompilationInfo* outer_info = info();
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
@@ -4485,6 +4734,8 @@
     TraceInline(target, caller, "target requires context change");
     return false;
   }
+#endif
+
 
   // Don't inline deeper than kMaxInliningLevels calls.
   HEnvironment* env = environment();
@@ -4499,9 +4750,13 @@
   }
 
   // Don't inline recursive functions.
-  if (*target_shared == outer_info->closure()->shared()) {
-    TraceInline(target, caller, "target is recursive");
-    return false;
+  for (FunctionState* state = function_state();
+       state != NULL;
+       state = state->outer()) {
+    if (state->compilation_info()->closure()->shared() == *target_shared) {
+      TraceInline(target, caller, "target is recursive");
+      return false;
+    }
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
@@ -4514,7 +4769,7 @@
 
   // Parse and allocate variables.
   CompilationInfo target_info(target);
-  if (!ParserApi::Parse(&target_info) ||
+  if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
       !Scope::Analyze(&target_info)) {
     if (target_info.isolate()->has_pending_exception()) {
       // Parse or scope error, never optimize this function.
@@ -4574,11 +4829,11 @@
       TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
-    if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+    if (target_shared->scope_info() == ScopeInfo::Empty()) {
       // The scope info might not have been set if a lazily compiled
       // function is inlined before being called for the first time.
-      Handle<SerializedScopeInfo> target_scope_info =
-          SerializedScopeInfo::Create(target_info.scope());
+      Handle<ScopeInfo> target_scope_info =
+          ScopeInfo::Create(target_info.scope());
       target_shared->set_scope_info(*target_scope_info);
     }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -4596,8 +4851,12 @@
   ASSERT(target_shared->has_deoptimization_support());
   TypeFeedbackOracle target_oracle(
       Handle<Code>(target_shared->code()),
-      Handle<Context>(target->context()->global_context()));
-  FunctionState target_state(this, &target_info, &target_oracle);
+      Handle<Context>(target->context()->global_context()),
+      isolate());
+  // The function state is new-allocated because we need to delete it
+  // in two different places.
+  FunctionState* target_state =
+      new FunctionState(this, &target_info, &target_oracle, drop_extra);
 
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner_env =
@@ -4605,6 +4864,17 @@
                                      function,
                                      undefined,
                                      call_kind);
+#ifdef V8_TARGET_ARCH_IA32
+  // IA32 only, overwrite the caller's context in the deoptimization
+  // environment with the correct one.
+  //
+  // TODO(kmillikin): implement the same inlining on other platforms so we
+  // can remove the unsightly ifdefs in this function.
+  HConstant* context = new HConstant(Handle<Context>(target->context()),
+                                     Representation::Tagged());
+  AddInstruction(context);
+  inner_env->BindContext(context);
+#endif
   HBasicBlock* body_entry = CreateBasicBlock(inner_env);
   current_block()->Goto(body_entry);
   body_entry->SetJoinId(expr->ReturnId());
@@ -4620,6 +4890,7 @@
     TraceInline(target, caller, "inline graph construction failed");
     target_shared->DisableOptimization(*target);
     inline_bailout_ = true;
+    delete target_state;
     return true;
   }
 
@@ -4635,9 +4906,11 @@
       ASSERT(function_return() != NULL);
       ASSERT(call_context()->IsEffect() || call_context()->IsValue());
       if (call_context()->IsEffect()) {
-        current_block()->Goto(function_return());
+        current_block()->Goto(function_return(), drop_extra);
       } else {
-        current_block()->AddLeaveInlined(undefined, function_return());
+        current_block()->AddLeaveInlined(undefined,
+                                         function_return(),
+                                         drop_extra);
       }
     } else {
       // The graph builder assumes control can reach both branches of a
@@ -4645,13 +4918,14 @@
       // simply jumping to the false target.
       //
       // TODO(3168478): refactor to avoid this.
+      ASSERT(call_context()->IsTest());
       HBasicBlock* empty_true = graph()->CreateBasicBlock();
       HBasicBlock* empty_false = graph()->CreateBasicBlock();
       HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
       current_block()->Finish(test);
 
-      empty_true->Goto(inlined_test_context()->if_true());
-      empty_false->Goto(inlined_test_context()->if_false());
+      empty_true->Goto(inlined_test_context()->if_true(), drop_extra);
+      empty_false->Goto(inlined_test_context()->if_false(), drop_extra);
     }
   }
 
@@ -4663,19 +4937,21 @@
     // Pop the return test context from the expression context stack.
     ASSERT(ast_context() == inlined_test_context());
     ClearInlinedTestContext();
+    delete target_state;
 
     // Forward to the real test context.
     if (if_true->HasPredecessor()) {
       if_true->SetJoinId(expr->id());
       HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-      if_true->Goto(true_target);
+      if_true->Goto(true_target, function_state()->drop_extra());
     }
     if (if_false->HasPredecessor()) {
       if_false->SetJoinId(expr->id());
       HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-      if_false->Goto(false_target);
+      if_false->Goto(false_target, function_state()->drop_extra());
     }
     set_current_block(NULL);
+    return true;
 
   } else if (function_return()->HasPredecessor()) {
     function_return()->SetJoinId(expr->id());
@@ -4683,7 +4959,7 @@
   } else {
     set_current_block(NULL);
   }
-
+  delete target_state;
   return true;
 }
 
@@ -4764,7 +5040,7 @@
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
-            ASSERT(!square_root->HasSideEffects());
+            ASSERT(!square_root->HasObservableSideEffects());
             result = new(zone()) HDiv(context, double_one, square_root);
           } else if (exponent == 2.0) {
             result = new(zone()) HMul(context, left, left);
@@ -4897,7 +5173,7 @@
         return;
       }
 
-      if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
+      if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
           expr->check_type() != RECEIVER_MAP_CHECK) {
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
@@ -4925,8 +5201,8 @@
     }
 
   } else {
+    expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
-    // FIXME.
     bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
     if (global_call) {
@@ -4935,7 +5211,7 @@
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      LookupResult lookup;
+      LookupResult lookup(isolate());
       GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
       if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
@@ -4978,8 +5254,30 @@
         Drop(argument_count);
       }
 
+    } else if (expr->IsMonomorphic()) {
+      // The function is on the stack in the unoptimized code during
+      // evaluation of the arguments.
+      CHECK_ALIVE(VisitForValue(expr->expression()));
+      HValue* function = Top();
+      HValue* context = environment()->LookupContext();
+      HGlobalObject* global = new(zone()) HGlobalObject(context);
+      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
+      AddInstruction(global);
+      PushAndAdd(receiver);
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+      AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+      if (TryInline(expr, true)) {   // Drop function from environment.
+        return;
+      } else {
+        call = PreProcessCall(new(zone()) HInvokeFunction(context,
+                                                          function,
+                                                          argument_count));
+        Drop(1);  // The function.
+      }
+
     } else {
-      CHECK_ALIVE(VisitArgument(expr->expression()));
+      CHECK_ALIVE(VisitForValue(expr->expression()));
+      HValue* function = Top();
       HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
       HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
@@ -4988,9 +5286,7 @@
       PushAndAdd(new(zone()) HPushArgument(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      // The function to call is treated as an argument to the call function
-      // stub.
-      call = new(zone()) HCallFunction(context, argument_count + 1);
+      call = new(zone()) HCallFunction(context, function, argument_count);
       Drop(argument_count + 1);
     }
   }
@@ -5185,7 +5481,6 @@
 
 
 void HGraphBuilder::VisitNot(UnaryOperation* expr) {
-  // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -5207,7 +5502,7 @@
                                 materialize_true));
 
   if (materialize_false->HasPredecessor()) {
-    materialize_false->SetJoinId(expr->expression()->id());
+    materialize_false->SetJoinId(expr->MaterializeFalseId());
     set_current_block(materialize_false);
     Push(graph()->GetConstantFalse());
   } else {
@@ -5215,7 +5510,7 @@
   }
 
   if (materialize_true->HasPredecessor()) {
-    materialize_true->SetJoinId(expr->expression()->id());
+    materialize_true->SetJoinId(expr->MaterializeTrueId());
     set_current_block(materialize_true);
     Push(graph()->GetConstantTrue());
   } else {
@@ -5284,7 +5579,7 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == Variable::CONST)  {
+    if (var->mode() == CONST)  {
       return Bailout("unsupported count operation with const");
     }
     // Argument of the count operation is a variable, not a property.
@@ -5328,7 +5623,9 @@
         HStoreContextSlot* instr =
             new(zone()) HStoreContextSlot(context, var->index(), after);
         AddInstruction(instr);
-        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(expr->AssignmentId());
+        }
         break;
       }
 
@@ -5357,7 +5654,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(expr->CountId());
+      if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
 
       after = BuildIncrement(returns_original_input, expr);
       input = Pop();
@@ -5370,7 +5667,7 @@
       // necessary.
       environment()->SetExpressionStackAt(0, after);
       if (returns_original_input) environment()->SetExpressionStackAt(1, input);
-      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
 
     } else {
       // Keyed property.
@@ -5447,38 +5744,34 @@
         AddInstruction(HCheckInstanceType::NewIsString(right));
         instr = new(zone()) HStringAdd(context, left, right);
       } else {
-        instr = new(zone()) HAdd(context, left, right);
+        instr = HAdd::NewHAdd(zone(), context, left, right);
       }
       break;
     case Token::SUB:
-      instr = new(zone()) HSub(context, left, right);
+      instr = HSub::NewHSub(zone(), context, left, right);
       break;
     case Token::MUL:
-      instr = new(zone()) HMul(context, left, right);
+      instr = HMul::NewHMul(zone(), context, left, right);
       break;
     case Token::MOD:
-      instr = new(zone()) HMod(context, left, right);
+      instr = HMod::NewHMod(zone(), context, left, right);
       break;
     case Token::DIV:
-      instr = new(zone()) HDiv(context, left, right);
+      instr = HDiv::NewHDiv(zone(), context, left, right);
       break;
     case Token::BIT_XOR:
-      instr = new(zone()) HBitXor(context, left, right);
-      break;
     case Token::BIT_AND:
-      instr = new(zone()) HBitAnd(context, left, right);
-      break;
     case Token::BIT_OR:
-      instr = new(zone()) HBitOr(context, left, right);
+      instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
       break;
     case Token::SAR:
-      instr = new(zone()) HSar(context, left, right);
+      instr = HSar::NewHSar(zone(), context, left, right);
       break;
     case Token::SHR:
-      instr = new(zone()) HShr(context, left, right);
+      instr = HShr::NewHShr(zone(), context, left, right);
       break;
     case Token::SHL:
-      instr = new(zone()) HShl(context, left, right);
+      instr = HShl::NewHShl(zone(), context, left, right);
       break;
     default:
       UNREACHABLE();
@@ -5671,26 +5964,66 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                               Expression* expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                               HTypeof* typeof_expr,
                                                Handle<String> check) {
-  CHECK_ALIVE(VisitForTypeOf(expr));
-  HValue* expr_value = Pop();
-  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+  // Note: The HTypeof itself is removed during canonicalization, if possible.
+  HValue* value = typeof_expr->value();
+  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
+  instr->set_position(expr->position());
+  return ast_context()->ReturnControl(instr, expr->id());
 }
 
 
-void HGraphBuilder::HandleLiteralCompareUndefined(
-    CompareOperation* compare_expr, Expression* expr) {
-  CHECK_ALIVE(VisitForValue(expr));
-  HValue* lhs = Pop();
-  HValue* rhs = graph()->GetConstantUndefined();
-  HCompareObjectEqAndBranch* instr =
-      new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+static bool MatchLiteralCompareNil(HValue* left,
+                                   Token::Value op,
+                                   HValue* right,
+                                   Handle<Object> nil,
+                                   HValue** expr) {
+  if (left->IsConstant() &&
+      HConstant::cast(left)->handle().is_identical_to(nil) &&
+      Token::IsEqualityOp(op)) {
+    *expr = right;
+    return true;
+  }
+  return false;
+}
+
+
+static bool MatchLiteralCompareTypeof(HValue* left,
+                                      Token::Value op,
+                                      HValue* right,
+                                      HTypeof** typeof_expr,
+                                      Handle<String>* check) {
+  if (left->IsTypeof() &&
+      Token::IsEqualityOp(op) &&
+      right->IsConstant() &&
+      HConstant::cast(right)->HasStringValue()) {
+    *typeof_expr = HTypeof::cast(left);
+    *check = Handle<String>::cast(HConstant::cast(right)->handle());
+    return true;
+  }
+  return false;
+}
+
+
+static bool IsLiteralCompareTypeof(HValue* left,
+                                   Token::Value op,
+                                   HValue* right,
+                                   HTypeof** typeof_expr,
+                                   Handle<String>* check) {
+  return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
+      MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
+}
+
+
+static bool IsLiteralCompareNil(HValue* left,
+                                Token::Value op,
+                                HValue* right,
+                                Handle<Object> nil,
+                                HValue** expr) {
+  return MatchLiteralCompareNil(left, op, right, nil, expr) ||
+      MatchLiteralCompareNil(right, op, left, nil, expr);
 }
 
 
@@ -5711,21 +6044,9 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  // Check for special cases that compare against literals.
-  Expression *sub_expr;
-  Handle<String> check;
-  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
-    HandleLiteralCompareTypeof(expr, sub_expr, check);
-    return;
-  }
-
-  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
-    HandleLiteralCompareUndefined(expr, sub_expr);
-    return;
-  }
-
   TypeInfo type_info = oracle()->CompareType(expr);
   // Check if this expression was ever executed according to type feedback.
+  // Note that for the special typeof/null/undefined cases we get unknown here.
   if (type_info.IsUninitialized()) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -5740,6 +6061,20 @@
   HValue* left = Pop();
   Token::Value op = expr->op();
 
+  HTypeof* typeof_expr = NULL;
+  Handle<String> check;
+  if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
+    return HandleLiteralCompareTypeof(expr, typeof_expr, check);
+  }
+  HValue* sub_expr = NULL;
+  Factory* f = graph()->isolate()->factory();
+  if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
+    return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+  }
+  if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
+    return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+  }
+
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
     // residing in new space. If it is we assume that the function will stay the
@@ -5752,7 +6087,7 @@
         !info()->global_object()->IsAccessCheckNeeded()) {
       Handle<String> name = proxy->name();
       Handle<GlobalObject> global(info()->global_object());
-      LookupResult lookup;
+      LookupResult lookup(isolate());
       global->Lookup(*name, &lookup);
       if (lookup.IsProperty() &&
           lookup.type() == NORMAL &&
@@ -5827,14 +6162,16 @@
 }
 
 
-void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                            HValue* value,
+                                            NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  CHECK_ALIVE(VisitForValue(expr->expression()));
-  HValue* value = Pop();
-  HIsNullAndBranch* instr =
-      new(zone()) HIsNullAndBranch(value, expr->is_strict());
+  EqualityKind kind =
+      expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
+  HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
+  instr->set_position(expr->position());
   return ast_context()->ReturnControl(instr, expr->id());
 }
 
@@ -5843,7 +6180,8 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HThisFunction* self = new(zone()) HThisFunction;
+  HThisFunction* self = new(zone()) HThisFunction(
+      function_state()->compilation_info()->closure());
   return ast_context()->ReturnInstruction(self, expr->id());
 }
 
@@ -5854,9 +6192,11 @@
 
 
 void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
-                                      Variable::Mode mode,
+                                      VariableMode mode,
                                       FunctionLiteral* function) {
-  if (mode == Variable::LET) return Bailout("unsupported let declaration");
+  if (mode == LET || mode == CONST_HARMONY) {
+    return Bailout("unsupported harmony declaration");
+  }
   Variable* var = proxy->var();
   switch (var->location()) {
     case Variable::UNALLOCATED:
@@ -5864,9 +6204,9 @@
     case Variable::PARAMETER:
     case Variable::LOCAL:
     case Variable::CONTEXT:
-      if (mode == Variable::CONST || function != NULL) {
+      if (mode == CONST || function != NULL) {
         HValue* value = NULL;
-        if (mode == Variable::CONST) {
+        if (mode == CONST) {
           value = graph()->GetConstantHole();
         } else {
           VisitForValue(function);
@@ -5877,7 +6217,7 @@
           HStoreContextSlot* store =
               new HStoreContextSlot(context, var->index(), value);
           AddInstruction(store);
-          if (store->HasSideEffects()) AddSimulate(proxy->id());
+          if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
         } else {
           environment()->Bind(var, value);
         }
@@ -5917,9 +6257,7 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
-      new(zone()) HHasInstanceTypeAndBranch(value,
-                                            JS_FUNCTION_TYPE,
-                                            JS_FUNCTION_PROXY_TYPE);
+      new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
   return ast_context()->ReturnControl(result, call->id());
 }
 
@@ -6047,7 +6385,44 @@
 
 
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  return Bailout("inlined runtime function: SetValueOf");
+  ASSERT(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* value = Pop();
+  HValue* object = Pop();
+  // Check if object is a not a smi.
+  HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
+  HBasicBlock* if_smi = graph()->CreateBasicBlock();
+  HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
+  HBasicBlock* join = graph()->CreateBasicBlock();
+  smicheck->SetSuccessorAt(0, if_smi);
+  smicheck->SetSuccessorAt(1, if_heap_object);
+  current_block()->Finish(smicheck);
+  if_smi->Goto(join);
+
+  // Check if object is a JSValue.
+  set_current_block(if_heap_object);
+  HHasInstanceTypeAndBranch* typecheck =
+      new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+  HBasicBlock* if_js_value = graph()->CreateBasicBlock();
+  HBasicBlock* not_js_value = graph()->CreateBasicBlock();
+  typecheck->SetSuccessorAt(0, if_js_value);
+  typecheck->SetSuccessorAt(1, not_js_value);
+  current_block()->Finish(typecheck);
+  not_js_value->Goto(join);
+
+  // Create in-object property store to kValueOffset.
+  set_current_block(if_js_value);
+  Handle<String> name = isolate()->factory()->undefined_symbol();
+  AddInstruction(new HStoreNamedField(object,
+                                      name,
+                                      value,
+                                      true,  // in-object store.
+                                      JSValue::kValueOffset));
+  if_js_value->Goto(join);
+  join->SetJoinId(call->id());
+  set_current_block(join);
+  return ast_context()->ReturnValue(value);
 }
 
 
@@ -6210,12 +6585,37 @@
     CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
   }
   CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+
   HValue* function = Pop();
   HValue* context = environment()->LookupContext();
-  HInvokeFunction* result =
-      new(zone()) HInvokeFunction(context, function, arg_count);
+
+  // Branch for function proxies, or other non-functions.
+  HHasInstanceTypeAndBranch* typecheck =
+      new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+  HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
+  HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
+  HBasicBlock* join = graph()->CreateBasicBlock();
+  typecheck->SetSuccessorAt(0, if_jsfunction);
+  typecheck->SetSuccessorAt(1, if_nonfunction);
+  current_block()->Finish(typecheck);
+
+  set_current_block(if_jsfunction);
+  HInstruction* invoke_result = AddInstruction(
+      new(zone()) HInvokeFunction(context, function, arg_count));
   Drop(arg_count);
-  return ast_context()->ReturnInstruction(result, call->id());
+  Push(invoke_result);
+  if_jsfunction->Goto(join);
+
+  set_current_block(if_nonfunction);
+  HInstruction* call_result = AddInstruction(
+      new(zone()) HCallFunction(context, function, arg_count));
+  Drop(arg_count);
+  Push(call_result);
+  if_nonfunction->Goto(join);
+
+  set_current_block(join);
+  join->SetJoinId(call->id());
+  return ast_context()->ReturnValue(Pop());
 }
 
 
@@ -6255,6 +6655,18 @@
 }
 
 
+void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+  ASSERT_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+  result->set_transcendental_type(TranscendentalCache::TAN);
+  Drop(1);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
@@ -6472,7 +6884,7 @@
   // If the function we are inlining is a strict mode function or a
   // builtin function, pass undefined as the receiver for function
   // calls (instead of the global receiver).
-  if ((target->shared()->native() || function->strict_mode()) &&
+  if ((target->shared()->native() || !function->is_classic_mode()) &&
       call_kind == CALL_AS_FUNCTION) {
     inner->SetValueAt(0, undefined);
   }
@@ -6819,7 +7231,7 @@
   }
 
 #ifdef DEBUG
-  if (graph_ != NULL) graph_->Verify();
+  if (graph_ != NULL) graph_->Verify(false);  // No full verify.
   if (allocator_ != NULL) allocator_->Verify();
 #endif
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 03fbc73..ded1356 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -121,7 +121,7 @@
 
   void Finish(HControlInstruction* last);
   void FinishExit(HControlInstruction* instruction);
-  void Goto(HBasicBlock* block);
+  void Goto(HBasicBlock* block, bool drop_extra = false);
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
   void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@@ -133,7 +133,9 @@
 
   // Add the inlined function exit sequence, adding an HLeaveInlined
   // instruction and updating the bailout environment.
-  void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
+  void AddLeaveInlined(HValue* return_value,
+                       HBasicBlock* target,
+                       bool drop_extra = false);
 
   // If a target block is tagged as an inline function return, all
   // predecessors should contain the inlined exit sequence:
@@ -243,11 +245,13 @@
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
-  bool CheckPhis();
+  bool CheckArgumentsPhiUses();
 
-  // Returns false if there are phi-uses of hole values comming
-  // from uninitialized consts.
-  bool CollectPhis();
+  // Returns false if there are phi-uses of an uninitialized const
+  // which are not supported by the optimizing compiler.
+  bool CheckConstPhiUses();
+
+  void CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
 
@@ -283,7 +287,7 @@
   }
 
 #ifdef DEBUG
-  void Verify() const;
+  void Verify(bool do_full_verify) const;
 #endif
 
  private:
@@ -601,16 +605,18 @@
 };
 
 
-class FunctionState BASE_EMBEDDED {
+class FunctionState {
  public:
   FunctionState(HGraphBuilder* owner,
                 CompilationInfo* info,
-                TypeFeedbackOracle* oracle);
+                TypeFeedbackOracle* oracle,
+                bool drop_extra);
   ~FunctionState();
 
   CompilationInfo* compilation_info() { return compilation_info_; }
   TypeFeedbackOracle* oracle() { return oracle_; }
   AstContext* call_context() { return call_context_; }
+  bool drop_extra() { return drop_extra_; }
   HBasicBlock* function_return() { return function_return_; }
   TestContext* test_context() { return test_context_; }
   void ClearInlinedTestContext() {
@@ -630,6 +636,10 @@
   // inlined. NULL when not inlining.
   AstContext* call_context_;
 
+  // Indicate if we have to drop an extra value from the environment on
+  // return from inlined functions.
+  bool drop_extra_;
+
   // When inlining in an effect of value context, this is the return block.
   // It is NULL otherwise.  When inlining in a test context, there are a
   // pair of return blocks in the context.  When not inlining, there is no
@@ -647,6 +657,7 @@
 class HGraphBuilder: public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
+  enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
 
   // A class encapsulating (lazily-allocated) break and continue blocks for
   // a breakable statement.  Separated from BreakAndContinueScope so that it
@@ -726,6 +737,8 @@
 
   TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
 
+  FunctionState* function_state() const { return function_state_; }
+
  private:
   // Type of a member function that generates inline code for a native function.
   typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -744,7 +757,6 @@
   static const int kMaxSourceSize = 600;
 
   // Simple accessors.
-  FunctionState* function_state() const { return function_state_; }
   void set_function_state(FunctionState* state) { function_state_ = state; }
 
   AstContext* ast_context() const { return ast_context_; }
@@ -767,8 +779,9 @@
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
   }
-  bool function_strict_mode() {
-    return function_state()->compilation_info()->is_strict_mode();
+  StrictModeFlag function_strict_mode_flag() {
+    return function_state()->compilation_info()->is_classic_mode()
+        ? kNonStrictMode : kStrictMode;
   }
 
   // Generators for inline runtime functions.
@@ -780,7 +793,7 @@
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
   void HandleDeclaration(VariableProxy* proxy,
-                         Variable::Mode mode,
+                         VariableMode mode,
                          FunctionLiteral* function);
 
   void VisitDelete(UnaryOperation* expr);
@@ -881,7 +894,7 @@
   // Try to optimize fun.apply(receiver, arguments) pattern.
   bool TryCallApply(Call* expr);
 
-  bool TryInline(Call* expr);
+  bool TryInline(Call* expr, bool drop_extra = false);
   bool TryInlineBuiltinFunction(Call* expr,
                                 HValue* receiver,
                                 Handle<Map> receiver_map,
@@ -910,11 +923,12 @@
                                   HValue* receiver,
                                   SmallMapList* types,
                                   Handle<String> name);
-  void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                  Expression* expr,
+  void HandleLiteralCompareTypeof(CompareOperation* expr,
+                                  HTypeof* typeof_expr,
                                   Handle<String> check);
-  void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
-                                     Expression* expr);
+  void HandleLiteralCompareNil(CompareOperation* expr,
+                               HValue* value,
+                               NilValue nil);
 
   HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
                                            HValue* string,
@@ -938,11 +952,16 @@
       HValue* val,
       ElementsKind elements_kind,
       bool is_store);
+  HInstruction* BuildFastElementAccess(HValue* elements,
+                                       HValue* checked_key,
+                                       HValue* val,
+                                       ElementsKind elements_kind,
+                                       bool is_store);
 
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
-                                              Expression* expr,
+                                              Handle<Map> map,
                                               bool is_store);
   HValue* HandlePolymorphicElementAccess(HValue* object,
                                          HValue* key,
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 0ca2d6b..5f67077 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -78,7 +78,9 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
 }
 
@@ -88,9 +90,14 @@
 }
 
 
-void RelocInfo::set_target_address(Address target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   Assembler::set_target_address_at(pc_, target);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -112,10 +119,16 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -142,11 +155,18 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -161,6 +181,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -194,14 +219,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -222,14 +247,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9996474..322ba44 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -55,6 +55,8 @@
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
@@ -86,23 +88,23 @@
   __ pushfd();
   __ push(ecx);
   __ push(ebx);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
   __ pushfd();
   __ pop(eax);
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ xor_(eax, 0x200000);  // Flip bit 21.
   __ push(eax);
   __ popfd();
   __ pushfd();
   __ pop(eax);
-  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
+  __ xor_(eax, edx);  // Different if CPUID is supported.
   __ j(not_zero, &cpuid);
 
   // CPUID not supported. Clear the supported features in edx:eax.
-  __ xor_(eax, Operand(eax));
-  __ xor_(edx, Operand(edx));
+  __ xor_(eax, eax);
+  __ xor_(edx, edx);
   __ jmp(&done);
 
   // Invoke CPUID with 1 in eax to get feature information in
@@ -118,13 +120,13 @@
 
   // Move the result from ecx:edx to edx:eax and make sure to mark the
   // CPUID feature as supported.
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
   __ or_(eax, 1 << CPUID);
-  __ mov(edx, Operand(ecx));
+  __ mov(edx, ecx);
 
   // Done.
   __ bind(&done);
-  __ mov(esp, Operand(ebp));
+  __ mov(esp, ebp);
   __ pop(ebx);
   __ pop(ecx);
   __ popfd();
@@ -286,6 +288,18 @@
       && ((buf_[0] & 0x07) == reg.code());  // register codes match.
 }
 
+
+bool Operand::is_reg_only() const {
+  return (buf_[0] & 0xF8) == 0xC0;  // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+  ASSERT(is_reg_only());
+  return Register::from_code(buf_[0] & 0x07);
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
@@ -485,7 +499,7 @@
 
 
 void Assembler::mov_b(Register dst, const Operand& src) {
-  ASSERT(dst.code() < 4);
+  CHECK(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x8A);
   emit_operand(dst, src);
@@ -501,7 +515,7 @@
 
 
 void Assembler::mov_b(const Operand& dst, Register src) {
-  ASSERT(src.code() < 4);
+  CHECK(src.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x88);
   emit_operand(src, dst);
@@ -614,26 +628,6 @@
 }
 
 
-void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(imm32);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
-  ASSERT(CpuFeatures::IsEnabled(CMOV));
-  EnsureSpace ensure_space(this);
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(handle);
-}
-
-
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
@@ -701,6 +695,13 @@
 }
 
 
+void Assembler::add(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x01);
+  emit_operand(src, dst);
+}
+
+
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -741,25 +742,29 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  EMIT(0x80);
-  emit_operand(edi, op);  // edi == 7
+  if (op.is_reg(eax)) {
+    EMIT(0x3C);
+  } else {
+    EMIT(0x80);
+    emit_operand(edi, op);  // edi == 7
+  }
   EMIT(imm8);
 }
 
 
-void Assembler::cmpb(const Operand& dst, Register src) {
-  ASSERT(src.is_byte_register());
+void Assembler::cmpb(const Operand& op, Register reg) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x38);
-  emit_operand(src, dst);
+  emit_operand(reg, op);
 }
 
 
-void Assembler::cmpb(Register dst, const Operand& src) {
-  ASSERT(dst.is_byte_register());
+void Assembler::cmpb(Register reg, const Operand& op) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x3A);
-  emit_operand(dst, src);
+  emit_operand(reg, op);
 }
 
 
@@ -820,6 +825,7 @@
 
 
 void Assembler::dec_b(Register dst) {
+  CHECK(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0xFE);
   EMIT(0xC8 | dst.code());
@@ -1069,18 +1075,6 @@
 }
 
 
-void Assembler::subb(const Operand& op, int8_t imm8) {
-  EnsureSpace ensure_space(this);
-  if (op.is_reg(eax)) {
-    EMIT(0x2c);
-  } else {
-    EMIT(0x80);
-    emit_operand(ebp, op);  // ebp == 5
-  }
-  EMIT(imm8);
-}
-
-
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   emit_arith(5, dst, x);
@@ -1094,14 +1088,6 @@
 }
 
 
-void Assembler::subb(Register dst, const Operand& src) {
-  ASSERT(dst.code() < 4);
-  EnsureSpace ensure_space(this);
-  EMIT(0x2A);
-  emit_operand(dst, src);
-}
-
-
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x29);
@@ -1113,7 +1099,9 @@
   EnsureSpace ensure_space(this);
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
-  if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
+  if (imm.rmode_ == RelocInfo::NONE &&
+      is_uint8(imm.x_) &&
+      reg.is_byte_register()) {
     uint8_t imm8 = imm.x_;
     if (reg.is(eax)) {
       EMIT(0xA8);
@@ -1143,6 +1131,7 @@
 
 
 void Assembler::test_b(Register reg, const Operand& op) {
+  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x84);
   emit_operand(reg, op);
@@ -1158,6 +1147,10 @@
 
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
+  if (op.is_reg_only() && !op.reg().is_byte_register()) {
+    test(op, Immediate(imm8));
+    return;
+  }
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
@@ -1178,10 +1171,10 @@
 }
 
 
-void Assembler::xor_(const Operand& src, Register dst) {
+void Assembler::xor_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x31);
-  emit_operand(dst, src);
+  emit_operand(src, dst);
 }
 
 
@@ -1637,6 +1630,13 @@
 }
 
 
+void Assembler::fptan() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xF2);
+}
+
+
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   EMIT(0xD9);
@@ -2471,7 +2471,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4698e3e..d798f81 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -75,6 +75,8 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -300,9 +302,6 @@
 
 class Operand BASE_EMBEDDED {
  public:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
@@ -347,12 +346,16 @@
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
+  // Returns true if this Operand is a wrapper for one register.
+  bool is_reg_only() const;
+
+  // Asserts that this Operand is a wrapper for one register and returns the
+  // register.
+  Register reg() const;
+
  private:
-  byte buf_[6];
-  // The number of bytes in buf_.
-  unsigned int len_;
-  // Only valid if len_ > 4.
-  RelocInfo::Mode rmode_;
+  // reg
+  INLINE(explicit Operand(Register reg));
 
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
@@ -362,7 +365,15 @@
   inline void set_disp8(int8_t disp);
   inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
 
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
   friend class Assembler;
+  friend class MacroAssembler;
+  friend class LCodeGen;
 };
 
 
@@ -671,7 +682,9 @@
   void leave();
 
   // Moves
+  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
   void mov_b(Register dst, const Operand& src);
+  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
   void mov_b(const Operand& dst, int8_t imm8);
   void mov_b(const Operand& dst, Register src);
 
@@ -687,17 +700,22 @@
   void mov(const Operand& dst, Handle<Object> handle);
   void mov(const Operand& dst, Register src);
 
+  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
   void movsx_b(Register dst, const Operand& src);
 
+  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
   void movsx_w(Register dst, const Operand& src);
 
+  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
   void movzx_b(Register dst, const Operand& src);
 
+  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
   void movzx_w(Register dst, const Operand& src);
 
   // Conditional moves
-  void cmov(Condition cc, Register dst, int32_t imm32);
-  void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, Register src) {
+    cmov(cc, dst, Operand(src));
+  }
   void cmov(Condition cc, Register dst, const Operand& src);
 
   // Flag management.
@@ -715,24 +733,31 @@
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
 
+  void add(Register dst, Register src) { add(dst, Operand(src)); }
   void add(Register dst, const Operand& src);
+  void add(const Operand& dst, Register src);
+  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
   void add(const Operand& dst, const Immediate& x);
 
   void and_(Register dst, int32_t imm32);
   void and_(Register dst, const Immediate& x);
+  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
   void and_(Register dst, const Operand& src);
-  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, Register src);
   void and_(const Operand& dst, const Immediate& x);
 
+  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
   void cmpb(const Operand& op, int8_t imm8);
-  void cmpb(Register src, const Operand& dst);
-  void cmpb(const Operand& dst, Register src);
+  void cmpb(Register reg, const Operand& op);
+  void cmpb(const Operand& op, Register reg);
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
   void cmpw(const Operand& op, Immediate imm16);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
+  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -748,6 +773,7 @@
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
+  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
 
@@ -764,8 +790,10 @@
   void not_(Register dst);
 
   void or_(Register dst, int32_t imm32);
+  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
   void or_(Register dst, const Operand& src);
   void or_(const Operand& dst, Register src);
+  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
   void or_(const Operand& dst, const Immediate& x);
 
   void rcl(Register dst, uint8_t imm8);
@@ -776,35 +804,42 @@
 
   void sbb(Register dst, const Operand& src);
 
+  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
   void shl(Register dst, uint8_t imm8);
   void shl_cl(Register dst);
 
+  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
   void shr(Register dst, uint8_t imm8);
   void shr_cl(Register dst);
 
-  void subb(const Operand& dst, int8_t imm8);
-  void subb(Register dst, const Operand& src);
+  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
 
   void test(Register reg, const Immediate& imm);
+  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
   void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
+  void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
   void test_b(const Operand& op, uint8_t imm8);
 
   void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
   void xor_(Register dst, const Operand& src);
-  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, Register src);
+  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
   void xor_(const Operand& dst, const Immediate& x);
 
   // Bit operations.
   void bt(const Operand& dst, Register src);
+  void bts(Register dst, Register src) { bts(Operand(dst), src); }
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
@@ -835,6 +870,7 @@
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
   int CallSize(const Operand& adr);
+  void call(Register reg) { call(Operand(reg)); }
   void call(const Operand& adr);
   int CallSize(Handle<Code> code, RelocInfo::Mode mode);
   void call(Handle<Code> code,
@@ -845,6 +881,7 @@
   // unconditional jump to L
   void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(Register reg) { jmp(Operand(reg)); }
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
@@ -887,6 +924,7 @@
   void fchs();
   void fcos();
   void fsin();
+  void fptan();
   void fyl2x();
 
   void fadd(int i);
@@ -929,6 +967,7 @@
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
 
+  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
   void cvtsi2sd(XMMRegister dst, const Operand& src);
   void cvtss2sd(XMMRegister dst, XMMRegister src);
   void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -969,12 +1008,14 @@
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
 
+  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
-  void movd(const Operand& src, XMMRegister dst);
+  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+  void movd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, XMMRegister src);
 
   void movss(XMMRegister dst, const Operand& src);
-  void movss(const Operand& src, XMMRegister dst);
+  void movss(const Operand& dst, XMMRegister src);
   void movss(XMMRegister dst, XMMRegister src);
 
   void pand(XMMRegister dst, XMMRegister src);
@@ -987,11 +1028,17 @@
   void psrlq(XMMRegister reg, int8_t shift);
   void psrlq(XMMRegister dst, XMMRegister src);
   void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+  void pextrd(Register dst, XMMRegister src, int8_t offset) {
+    pextrd(Operand(dst), src, offset);
+  }
   void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+    pinsrd(dst, Operand(src), offset);
+  }
   void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
 
   // Parallel XMM operations.
-  void movntdqa(XMMRegister src, const Operand& dst);
+  void movntdqa(XMMRegister dst, const Operand& src);
   void movntdq(const Operand& dst, XMMRegister src);
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1045,6 +1092,9 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
@@ -1057,9 +1107,8 @@
 
   byte* addr_at(int pos) { return buffer_ + pos; }
 
+
  private:
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 310ea3d..e12e79a 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -69,7 +69,7 @@
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ add(Operand(eax), Immediate(num_extra_args + 1));
+  __ add(eax, Immediate(num_extra_args + 1));
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
@@ -80,25 +80,34 @@
   //  -- edi: constructor function
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that function is not a smi.
   __ JumpIfSmi(edi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &non_function_call);
+  __ j(not_equal, &slow);
 
   // Jump to the function-specific construct stub.
   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
   __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
-  __ jmp(Operand(ebx));
+  __ jmp(ebx);
 
   // edi: called object
   // eax: number of arguments
+  // ecx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing eax).
   __ Set(ebx, Immediate(0));
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   Handle<Code> arguments_adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
   __ SetCallKind(ecx, CALL_AS_METHOD);
@@ -113,265 +122,272 @@
   ASSERT(!is_api_function || !count_constructions);
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ SmiTag(eax);
-  __ push(eax);
+    // Store a smi-tagged arguments count on the stack.
+    __ SmiTag(eax);
+    __ push(eax);
 
-  // Push the function to invoke on the stack.
-  __ push(edi);
+    // Push the function to invoke on the stack.
+    __ push(edi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // edi: constructor
-    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    __ JumpIfSmi(eax, &rt_call);
-    // edi: constructor
-    // eax: initial map (if proven valid below)
-    __ CmpObjectType(eax, MAP_TYPE, ebx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // edi: constructor
+      __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      __ JumpIfSmi(eax, &rt_call);
+      // edi: constructor
+      // eax: initial map (if proven valid below)
+      __ CmpObjectType(eax, MAP_TYPE, ebx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // edi: constructor
-    // eax: initial map
-    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // edi: constructor
+      // eax: initial map
+      __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-      __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(eax);
-      __ push(edi);
-
-      __ push(edi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(edi);
-      __ pop(eax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // edi: constructor
-    // eax: initial map
-    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-    __ shl(edi, kPointerSizeLog2);
-    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-    Factory* factory = masm->isolate()->factory();
-    __ mov(ecx, factory->empty_fixed_array());
-    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-    // Set extra fields in the newly allocated object.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ mov(edx, factory->one_pointer_filler_map());
-      } else {
-        __ mov(edx, factory->undefined_value());
+        Label allocate;
+        // Decrease generous allocation count.
+        __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+        __ dec_b(FieldOperand(ecx,
+                              SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(eax);
+        __ push(edi);
+
+        __ push(edi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(edi);
+        __ pop(eax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      // edi: constructor
+      // eax: initial map
+      __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+      __ shl(edi, kPointerSizeLog2);
+      __ AllocateInNewSpace(
+          edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+      Factory* factory = masm->isolate()->factory();
+      __ mov(ecx, factory->empty_fixed_array());
+      __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+      __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+      // Set extra fields in the newly allocated object.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
       __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(ecx, 0), edx);
-      __ add(Operand(ecx), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(ecx, Operand(edi));
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    // Calculate the total number of properties described by the map.
-    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ add(edx, Operand(ecx));
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
-    __ sub(edx, Operand(ecx));
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // ebx: JSObject
-    // edi: start of next object (will be start of FixedArray)
-    // edx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          edx,
-                          edi,
-                          ecx,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // ebx: JSObject
-    // edi: FixedArray
-    // edx: number of elements
-    // ecx: start of next object
-    __ mov(eax, factory->fixed_array_map());
-    __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
-    __ SmiTag(edx);
-    __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
-
-    // Initialize the fields to undefined.
-    // ebx: JSObject
-    // edi: FixedArray
-    // ecx: start of next object
-    { Label loop, entry;
       __ mov(edx, factory->undefined_value());
-      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(eax, 0), edx);
-      __ add(Operand(eax), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(eax, Operand(ecx));
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzx_b(esi,
+                   FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(esi,
+               Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+        // esi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(esi, edi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(ecx, esi, edx);
+        __ mov(edx, factory->one_pointer_filler_map());
+      }
+      __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ or_(ebx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      // Calculate the total number of properties described by the map.
+      __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+      __ movzx_b(ecx,
+                 FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ add(edx, ecx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+      __ sub(edx, ecx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // ebx: JSObject
+      // edi: start of next object (will be start of FixedArray)
+      // edx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            edx,
+                            edi,
+                            ecx,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // ebx: JSObject
+      // edi: FixedArray
+      // edx: number of elements
+      // ecx: start of next object
+      __ mov(eax, factory->fixed_array_map());
+      __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
+      __ SmiTag(edx);
+      __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
+
+      // Initialize the fields to undefined.
+      // ebx: JSObject
+      // edi: FixedArray
+      // ecx: start of next object
+      { Label loop, entry;
+        __ mov(edx, factory->undefined_value());
+        __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ mov(Operand(eax, 0), edx);
+        __ add(eax, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmp(eax, ecx);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // ebx: JSObject
+      // edi: FixedArray
+      __ or_(edi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+      // Continue with JSObject being successfully allocated
+      // ebx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // ebx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(ebx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // ebx: JSObject
-    // edi: FixedArray
-    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
-    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+    // Allocate the new receiver object using the runtime call.
+    __ bind(&rt_call);
+    // Must restore edi (constructor) before calling runtime.
+    __ mov(edi, Operand(esp, 0));
+    // edi: function (constructor)
+    __ push(edi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(ebx, eax);  // store result in ebx
 
+    // New object allocated.
+    // ebx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(edi);
 
-    // Continue with JSObject being successfully allocated
-    // ebx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ mov(eax, Operand(esp, 0));
+    __ SmiUntag(eax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // ebx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(ebx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(ebx);
+    __ push(ebx);
+
+    // Setup pointer to last argument.
+    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ mov(ecx, eax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(ebx, ecx, times_4, 0));
+    __ bind(&entry);
+    __ dec(ecx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Restore context from the frame.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(eax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ mov(eax, Operand(esp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ mov(ebx, Operand(esp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  __ bind(&rt_call);
-  // Must restore edi (constructor) before calling runtime.
-  __ mov(edi, Operand(esp, 0));
-  // edi: function (constructor)
-  __ push(edi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(ebx, Operand(eax));  // store result in ebx
-
-  // New object allocated.
-  // ebx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(edi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ mov(eax, Operand(esp, 0));
-  __ SmiUntag(eax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(ebx);
-  __ push(ebx);
-
-  // Setup pointer to last argument.
-  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ mov(ecx, Operand(eax));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(ebx, ecx, times_4, 0));
-  __ bind(&entry);
-  __ dec(ecx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(eax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ mov(eax, Operand(esp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -399,57 +415,58 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ Set(esi, Immediate(0));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the previous frame pointer (ebx) to access C arguments
-  __ mov(ebx, Operand(ebp, 0));
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
 
-  // Get the function from the frame and setup the context.
-  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+    // Get the function from the frame and setup the context.
+    __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+    __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(ecx);
-  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+    // Push the function and the receiver onto the stack.
+    __ push(ecx);
+    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
-  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+    // Load the number of arguments and setup pointer to the arguments.
+    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
 
-  // Copy arguments to the stack in a loop.
-  Label loop, entry;
-  __ Set(ecx, Immediate(0));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
-  __ push(Operand(edx, 0));  // dereference handle
-  __ inc(Operand(ecx));
-  __ bind(&entry);
-  __ cmp(ecx, Operand(eax));
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    Label loop, entry;
+    __ Set(ecx, Immediate(0));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+    __ push(Operand(edx, 0));  // dereference handle
+    __ inc(ecx);
+    __ bind(&entry);
+    __ cmp(ecx, eax);
+    __ j(not_equal, &loop);
 
-  // Get the function from the stack and call it.
-  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
+    // Get the function from the stack and call it.
+    // kPointerSize for the receiver.
+    __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
 
-  // Invoke the code.
-  if (is_construct) {
-    __ call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      __ call(masm->isolate()->builtins()->JSConstructCall(),
+              RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Exit the internal frame. Notice that this also removes the empty.
+    // context and the function left on the stack by the code
+    // invocation.
   }
-
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(kPointerSize);  // Remove receiver.
 }
 
 
@@ -464,68 +481,68 @@
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function onto the stack.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the function and deoptimization type to the runtime system.
-  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Pass the function and deoptimization type to the runtime system.
+    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -566,9 +583,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ popad();
   __ ret(0);
 }
@@ -579,7 +597,7 @@
 
   // 1. Make sure we have at least one argument.
   { Label done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
@@ -631,18 +649,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ SmiTag(eax);
-    __ push(eax);
 
-    __ push(ebx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(ebx, eax);
-    __ Set(edx, Immediate(0));  // restore
+    { // In order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(eax);
+      __ push(eax);
 
-    __ pop(eax);
-    __ SmiUntag(eax);
-    __ LeaveInternalFrame();
+      __ push(ebx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(ebx, eax);
+      __ Set(edx, Immediate(0));  // restore
+
+      __ pop(eax);
+      __ SmiUntag(eax);
+    }
+
     // Restore the function to edi.
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
@@ -695,22 +716,23 @@
   // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
   //     or a function proxy via CALL_FUNCTION_PROXY.
   { Label function, non_proxy;
-    __ test(edx, Operand(edx));
+    __ test(edx, edx);
     __ j(zero, &function);
     __ Set(ebx, Immediate(0));
-    __ SetCallKind(ecx, CALL_AS_METHOD);
-    __ cmp(Operand(edx), Immediate(1));
+    __ cmp(edx, Immediate(1));
     __ j(not_equal, &non_proxy);
 
     __ pop(edx);   // return address
     __ push(edi);  // re-add proxy object as additional argument
     __ push(edx);
     __ inc(eax);
+    __ SetCallKind(ecx, CALL_AS_FUNCTION);
     __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
 
     __ bind(&non_proxy);
+    __ SetCallKind(ecx, CALL_AS_METHOD);
     __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
@@ -726,13 +748,13 @@
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
   __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
-                NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
+                CALL_AS_METHOD);
 }
 
 
@@ -740,163 +762,160 @@
   static const int kArgumentsOffset = 2 * kPointerSize;
   static const int kReceiverOffset = 3 * kPointerSize;
   static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ EnterInternalFrame();
+    __ push(Operand(ebp, kFunctionOffset));  // push this
+    __ push(Operand(ebp, kArgumentsOffset));  // push arguments
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ push(Operand(ebp, kFunctionOffset));  // push this
-  __ push(Operand(ebp, kArgumentsOffset));  // push arguments
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(edi, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ mov(ecx, esp);
+    __ sub(ecx, edi);
+    // Make edx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ mov(edx, eax);
+    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, edx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edi, Operand::StaticVariable(real_stack_limit));
-  // Make ecx the space we have left. The stack might already be overflowed
-  // here which will cause ecx to become negative.
-  __ mov(ecx, Operand(esp));
-  __ sub(ecx, Operand(edi));
-  // Make edx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ mov(edx, Operand(eax));
-  __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-  // Check if the arguments will overflow the stack.
-  __ cmp(ecx, Operand(edx));
-  __ j(greater, &okay);  // Signed comparison.
+    // Out of stack space.
+    __ push(Operand(ebp, 4 * kPointerSize));  // push this
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Out of stack space.
-  __ push(Operand(ebp, 4 * kPointerSize));  // push this
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(eax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(eax);  // limit
-  __ push(Immediate(0));  // index
+    // Get the receiver.
+    __ mov(ebx, Operand(ebp, kReceiverOffset));
 
-  // Get the receiver.
-  __ mov(ebx, Operand(ebp, kReceiverOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &push_receiver);
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &push_receiver);
+    // Change context eagerly to get the right global object if necessary.
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    Factory* factory = masm->isolate()->factory();
 
-  Factory* factory = masm->isolate()->factory();
+    // Do not transform the receiver for natives (shared already in ecx).
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives (shared already in ecx).
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-            1 << SharedFunctionInfo::kNativeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    // Compute the receiver in non-strict mode.
+    // Call ToObject on the receiver if it is not an object, or use the
+    // global object if it is null or undefined.
+    __ JumpIfSmi(ebx, &call_to_object);
+    __ cmp(ebx, factory->null_value());
+    __ j(equal, &use_global_receiver);
+    __ cmp(ebx, factory->undefined_value());
+    __ j(equal, &use_global_receiver);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  // Call ToObject on the receiver if it is not an object, or use the
-  // global object if it is null or undefined.
-  __ JumpIfSmi(ebx, &call_to_object);
-  __ cmp(ebx, factory->null_value());
-  __ j(equal, &use_global_receiver);
-  __ cmp(ebx, factory->undefined_value());
-  __ j(equal, &use_global_receiver);
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &push_receiver);
+    __ bind(&call_to_object);
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ jmp(&push_receiver);
 
-  __ bind(&call_to_object);
-  __ push(ebx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(ebx, Operand(eax));
-  __ jmp(&push_receiver);
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+    __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
-  __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(ebx);
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(ebx);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Push the nth argument.
+    __ push(eax);
 
-  // Push the nth argument.
-  __ push(eax);
+    // Update the index on the stack and in register eax.
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ add(eax, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), eax);
 
-  // Update the index on the stack and in register eax.
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
-  __ mov(Operand(ebp, kIndexOffset), eax);
+    __ bind(&entry);
+    __ cmp(eax, Operand(ebp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  __ bind(&entry);
-  __ cmp(eax, Operand(ebp, kLimitOffset));
-  __ j(not_equal, &loop);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(eax);
+    __ SmiUntag(eax);
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(eax);
-  __ SmiUntag(eax);
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(edi);  // add function proxy as last argument
+    __ inc(eax);
+    __ Set(ebx, Immediate(0));
+    __ SetCallKind(ecx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(edi);  // add function proxy as last argument
-  __ inc(eax);
-  __ Set(ebx, Immediate(0));
-  __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -907,10 +926,9 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity >= 0);
-
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
   // Load the initial map from the array function.
   __ mov(scratch1, FieldOperand(array_function,
                                 JSFunction::kPrototypeOrInitialMapOffset));
@@ -968,7 +986,6 @@
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -980,13 +997,17 @@
     }
   } else {
     Label loop, entry;
+    __ mov(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ mov(Operand(scratch1, 0), factory->the_hole_value());
-    __ add(Operand(scratch1), Immediate(kPointerSize));
+    __ mov(FieldOperand(scratch1,
+                        scratch2,
+                        times_pointer_size,
+                        FixedArray::kHeaderSize),
+           factory->the_hole_value());
     __ bind(&entry);
-    __ cmp(scratch1, Operand(scratch2));
-    __ j(below, &loop);
+    __ dec(scratch2);
+    __ j(not_sign, &loop);
   }
 }
 
@@ -1082,7 +1103,7 @@
     __ bind(&loop);
     __ stos();
     __ bind(&entry);
-    __ cmp(edi, Operand(elements_array_end));
+    __ cmp(edi, elements_array_end);
     __ j(below, &loop);
     __ bind(&done);
   }
@@ -1120,7 +1141,7 @@
   __ push(eax);
 
   // Check for array construction with zero arguments.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &argc_one_or_more);
 
   __ bind(&empty_array);
@@ -1131,7 +1152,6 @@
                        ebx,
                        ecx,
                        edi,
-                       kPreallocatedArrayElements,
                        &prepare_generic_code_call);
   __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
   __ pop(ebx);
@@ -1147,7 +1167,7 @@
   __ j(not_equal, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &not_empty_array);
 
   // The single argument passed is zero, so we jump to the code above used to
@@ -1160,7 +1180,7 @@
     __ mov(eax, Operand(esp, i * kPointerSize));
     __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
   }
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // Drop two stack slots.
+  __ Drop(2);  // Drop two stack slots.
   __ push(Immediate(0));  // Treat this as a call with argc of zero.
   __ jmp(&empty_array);
 
@@ -1250,7 +1270,7 @@
   __ bind(&loop);
   __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
   __ mov(Operand(edx, 0), eax);
-  __ add(Operand(edx), Immediate(kPointerSize));
+  __ add(edx, Immediate(kPointerSize));
   __ bind(&entry);
   __ dec(ecx);
   __ j(greater_equal, &loop);
@@ -1356,14 +1376,14 @@
 
   if (FLAG_debug_code) {
     __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
-    __ cmp(edi, Operand(ecx));
+    __ cmp(edi, ecx);
     __ Assert(equal, "Unexpected String function");
   }
 
   // Load the first argument into eax and get rid of the rest
   // (including the receiver).
   Label no_arguments;
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &no_arguments);
   __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
   __ pop(ecx);
@@ -1439,12 +1459,13 @@
   // Invoke the conversion builtin and put the result into ebx.
   __ bind(&convert_argument);
   __ IncrementCounter(counters->string_ctor_conversions(), 1);
-  __ EnterInternalFrame();
-  __ push(edi);  // Preserve the function.
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ pop(edi);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edi);  // Preserve the function.
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+    __ pop(edi);
+  }
   __ mov(ebx, eax);
   __ jmp(&argument_is_string);
 
@@ -1461,17 +1482,18 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1);
-  __ EnterInternalFrame();
-  __ push(ebx);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ ret(0);
 }
 
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Store the arguments adaptor context sentinel.
   __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1515,7 +1537,7 @@
   __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
 
   Label enough, too_few;
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(less, &too_few);
   __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
   __ j(equal, &dont_adapt_arguments);
@@ -1533,8 +1555,8 @@
     __ bind(&copy);
     __ inc(edi);
     __ push(Operand(eax, 0));
-    __ sub(Operand(eax), Immediate(kPointerSize));
-    __ cmp(edi, Operand(ebx));
+    __ sub(eax, Immediate(kPointerSize));
+    __ cmp(edi, ebx);
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1547,17 +1569,17 @@
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
     // ebx = expected - actual.
-    __ sub(ebx, Operand(eax));
+    __ sub(ebx, eax);
     // eax = -actual - 1
     __ neg(eax);
-    __ sub(Operand(eax), Immediate(1));
+    __ sub(eax, Immediate(1));
 
     Label copy;
     __ bind(&copy);
     __ inc(eax);
     __ push(Operand(edi, 0));
-    __ sub(Operand(edi), Immediate(kPointerSize));
-    __ test(eax, Operand(eax));
+    __ sub(edi, Immediate(kPointerSize));
+    __ test(eax, eax);
     __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
@@ -1565,7 +1587,7 @@
     __ bind(&fill);
     __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(eax, Operand(ebx));
+    __ cmp(eax, ebx);
     __ j(less, &fill);
   }
 
@@ -1573,7 +1595,7 @@
   __ bind(&invoke);
   // Restore function pointer.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ call(Operand(edx));
+  __ call(edx);
 
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
@@ -1583,13 +1605,13 @@
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
   CpuFeatures::TryForceFeatureScope scope(SSE2);
-  if (!CpuFeatures::IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
@@ -1616,15 +1638,16 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(eax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(eax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
   Label skip;
-  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ cmp(eax, Immediate(Smi::FromInt(-1)));
   __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
@@ -1638,7 +1661,9 @@
   __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8a5bd50..3a286f0 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -34,6 +34,8 @@
 #include "isolate.h"
 #include "jsregexp.h"
 #include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -49,7 +51,7 @@
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+  __ cmp(ebx, Immediate(factory->heap_number_map()));
   __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
@@ -70,9 +72,9 @@
   // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -150,7 +152,7 @@
   }
 
   // Return and remove the on-stack parameter.
-  __ mov(esi, Operand(eax));
+  __ mov(esi, eax);
   __ ret(1 * kPointerSize);
 
   // Need to collect. Call into runtime system.
@@ -159,6 +161,139 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + (1 * kPointerSize)]: function
+  // [esp + (2 * kPointerSize)]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        eax, ebx, ecx, &gc, TAG_OBJECT);
+
+  // Get the function or sentinel from the stack.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+  // Get the serialized scope info from the stack.
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+
+  // Setup the object header.
+  Factory* factory = masm->isolate()->factory();
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         factory->block_context_map());
+  __ mov(FieldOperand(eax, Context::kLengthOffset),
+         Immediate(Smi::FromInt(length)));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmp(ecx, 0);
+    __ Assert(equal, message);
+  }
+  __ mov(ecx, GlobalObjectOperand());
+  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+  __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Setup the fixed slots.
+  __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
+  __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
+  __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
+
+  // Copy the global object from the previous context.
+  __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
+
+  // Initialize the rest of the slots to the hole value.
+  if (slots_ == 1) {
+    __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
+           factory->the_hole_value());
+  } else {
+    __ mov(ebx, factory->the_hole_value());
+    for (int i = 0; i < slots_; i++) {
+      __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
+    }
+  }
+
+  // Return and remove the on-stack parameters.
+  __ mov(esi, eax);
+  __ ret(2 * kPointerSize);
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // ecx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ mov(ebx, FieldOperand(ecx, i));
+      __ mov(FieldOperand(eax, i), ebx);
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ lea(edx, Operand(eax, JSArray::kSize));
+    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+    // Copy the elements array.
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+      for (int i = 0; i < elements_size; i += kPointerSize) {
+        __ mov(ebx, FieldOperand(ecx, i));
+        __ mov(FieldOperand(edx, i), ebx);
+      }
+    } else {
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+      int i;
+      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+        __ mov(ebx, FieldOperand(ecx, i));
+        __ mov(FieldOperand(edx, i), ebx);
+      }
+      while (i < elements_size) {
+        __ fld_d(FieldOperand(ecx, i));
+        __ fstp_d(FieldOperand(edx, i));
+        i += kDoubleSize;
+      }
+      ASSERT(i == elements_size);
+    }
+  }
+}
+
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -166,13 +301,8 @@
   // [esp + (2 * kPointerSize)]: literal index.
   // [esp + (3 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into ecx and check if we need to create a
   // boilerplate.
-  Label slow_case;
   __ mov(ecx, Operand(esp, 3 * kPointerSize));
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   STATIC_ASSERT(kPointerSize == 4);
@@ -182,16 +312,43 @@
                            FixedArray::kHeaderSize));
   Factory* factory = masm->isolate()->factory();
   __ cmp(ecx, factory->undefined_value());
+  Label slow_case;
   __ j(equal, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // ecx is boilerplate object.
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ CheckMap(ebx, factory->fixed_cow_array_map(),
+                &check_fast_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ CheckMap(ebx, factory->fixed_array_map(),
+                &double_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Handle<Map> expected_map;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map = factory->fixed_array_map();
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map = factory->fixed_double_array_map();
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map = factory->fixed_cow_array_map();
     }
@@ -202,32 +359,7 @@
     __ pop(ecx);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ mov(ebx, FieldOperand(ecx, i));
-      __ mov(FieldOperand(eax, i), ebx);
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
-    __ lea(edx, Operand(eax, JSArray::kSize));
-    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
-    // Copy the elements array.
-    for (int i = 0; i < elements_size; i += kPointerSize) {
-      __ mov(ebx, FieldOperand(ecx, i));
-      __ mov(FieldOperand(edx, i), ebx);
-    }
-  }
-
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   // Return and remove the on-stack parameters.
   __ ret(3 * kPointerSize);
 
@@ -236,9 +368,57 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + kPointerSize]: object literal flags.
+  // [esp + (2 * kPointerSize)]: constant properties.
+  // [esp + (3 * kPointerSize)]: literal index.
+  // [esp + (4 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into ecx and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ mov(ecx, Operand(esp, 4 * kPointerSize));
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  STATIC_ASSERT(kPointerSize == 4);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  Factory* factory = masm->isolate()->factory();
+  __ cmp(ecx, factory->undefined_value());
+  __ j(equal, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
+  __ cmp(eax, Immediate(size >> kPointerSizeLog2));
+  __ j(not_equal, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ mov(ebx, FieldOperand(ecx, i));
+    __ mov(FieldOperand(eax, i), ebx);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ ret(4 * kPointerSize);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   Factory* factory = masm->isolate()->factory();
   const Register argument = eax;
@@ -336,6 +516,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ pushad();
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(Operand(esp, i * kDoubleSize), reg);
+    }
+  }
+  const int argument_count = 1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, ecx);
+  __ mov(Operand(esp, 0 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(reg, Operand(esp, i * kDoubleSize));
+    }
+    __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  __ popad();
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -470,27 +685,27 @@
     // Check whether the exponent is too big for a 64 bit signed integer.
     static const uint32_t kTooBigExponent =
         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+    __ cmp(scratch2, Immediate(kTooBigExponent));
     __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
     // Reserve space for 64 bit answer.
-    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
     // Do conversion, which cannot fail because we checked the exponent.
     __ fisttp_d(Operand(esp, 0));
     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
   } else {
     // Load ecx with zero.  We use this either for the final shift or
     // for the answer.
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     // Check whether the exponent matches a 32 bit signed int that cannot be
     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
     // exponent is 30 (biased).  This is the exponent that we are fastest at and
     // also the highest exponent we can handle here.
     const uint32_t non_smi_exponent =
         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+    __ cmp(scratch2, Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
     __ j(equal, &right_exponent, Label::kNear);
@@ -503,7 +718,7 @@
       // >>> operator has a tendency to generate numbers with an exponent of 31.
       const uint32_t big_non_smi_exponent =
           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+      __ cmp(scratch2, Immediate(big_non_smi_exponent));
       __ j(not_equal, conversion_failure);
       // We have the big exponent, typically from >>>.  This means the number is
       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
@@ -522,9 +737,9 @@
       // Shift down 21 bits to get the most significant 11 bits or the low
       // mantissa word.
       __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, Operand(scratch2));
+      __ or_(ecx, scratch2);
       // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, Operand(scratch));
+      __ test(scratch, scratch);
       __ j(positive, &done, Label::kNear);
       __ neg(ecx);
       __ jmp(&done, Label::kNear);
@@ -538,14 +753,14 @@
     // it rounds to zero.
     const uint32_t zero_exponent =
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(Operand(scratch2), Immediate(zero_exponent));
+    __ sub(scratch2, Immediate(zero_exponent));
     // ecx already has a Smi zero.
     __ j(less, &done, Label::kNear);
 
     // We have a shifted exponent between 0 and 30 in scratch2.
     __ shr(scratch2, HeapNumber::kExponentShift);
     __ mov(ecx, Immediate(30));
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
 
     __ bind(&right_exponent);
     // Here ecx is the shift, scratch is the exponent word.
@@ -565,19 +780,19 @@
     // Shift down 22 bits to get the most significant 10 bits or the low
     // mantissa word.
     __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, Operand(scratch));
+    __ or_(scratch2, scratch);
     // Move down according to the exponent.
     __ shr_cl(scratch2);
     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
     // we may need to fix the sign.
     Label negative;
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
     __ j(greater, &negative, Label::kNear);
     __ mov(ecx, scratch2);
     __ jmp(&done, Label::kNear);
     __ bind(&negative);
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
     __ bind(&done);
   }
 }
@@ -679,13 +894,13 @@
   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
 
   // We can't handle -0 with smis, so use a type transition for that case.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, slow, slow_near);
 
   // Try optimistic subtraction '0 - value', saving operand in eax for undo.
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ Set(eax, Immediate(0));
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
   __ j(overflow, undo, undo_near);
   __ ret(0);
 }
@@ -706,7 +921,7 @@
 
 
 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
 }
 
 
@@ -760,7 +975,7 @@
     __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
             Immediate(HeapNumber::kSignMask));  // Flip sign.
   } else {
-    __ mov(edx, Operand(eax));
+    __ mov(edx, eax);
     // edx: operand
 
     Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -768,11 +983,12 @@
     __ jmp(&heapnumber_allocated, Label::kNear);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(edx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(edx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ pop(edx);
+    }
 
     __ bind(&heapnumber_allocated);
     // eax: allocated 'empty' number
@@ -815,15 +1031,16 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    // Push the original HeapNumber on the stack. The integer value can't
-    // be stored since it's untagged and not in the smi range (so we can't
-    // smi-tag it). We'll recalculate the value after the GC instead.
-    __ push(ebx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    // New HeapNumber is in eax.
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Push the original HeapNumber on the stack. The integer value can't
+      // be stored since it's untagged and not in the smi range (so we can't
+      // smi-tag it). We'll recalculate the value after the GC instead.
+      __ push(ebx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      // New HeapNumber is in eax.
+      __ pop(edx);
+    }
     // IntegerConvert uses ebx and edi as scratch registers.
     // This conversion won't go slow-case.
     IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -833,7 +1050,7 @@
   }
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ cvtsi2sd(xmm0, Operand(ecx));
+    __ cvtsi2sd(xmm0, ecx);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   } else {
     __ push(ecx);
@@ -947,6 +1164,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1022,7 +1243,7 @@
       // eax in case the result is not a smi.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      __ or_(right, left);  // Bitwise or is commutative.
       combined = right;
       break;
 
@@ -1034,7 +1255,7 @@
     case Token::DIV:
     case Token::MOD:
       __ mov(combined, right);
-      __ or_(combined, Operand(left));
+      __ or_(combined, left);
       break;
 
     case Token::SHL:
@@ -1044,7 +1265,7 @@
       // for the smi check register.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));
+      __ or_(right, left);
       combined = right;
       break;
 
@@ -1067,12 +1288,12 @@
 
     case Token::BIT_XOR:
       ASSERT(right.is(eax));
-      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      __ xor_(right, left);  // Bitwise xor is commutative.
       break;
 
     case Token::BIT_AND:
       ASSERT(right.is(eax));
-      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      __ and_(right, left);  // Bitwise and is commutative.
       break;
 
     case Token::SHL:
@@ -1121,12 +1342,12 @@
 
     case Token::ADD:
       ASSERT(right.is(eax));
-      __ add(right, Operand(left));  // Addition is commutative.
+      __ add(right, left);  // Addition is commutative.
       __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
-      __ sub(left, Operand(right));
+      __ sub(left, right);
       __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
@@ -1140,7 +1361,7 @@
       // Remove tag from one of the operands (but keep sign).
       __ SmiUntag(right);
       // Do multiplication.
-      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ imul(right, left);  // Multiplication is commutative.
       __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1151,7 +1372,7 @@
       // save the left operand.
       __ mov(edi, left);
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -1167,7 +1388,7 @@
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
       // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
+      __ test(edx, edx);
       __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(eax);
@@ -1175,7 +1396,7 @@
 
     case Token::MOD:
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
@@ -1226,11 +1447,11 @@
         break;
       case Token::ADD:
         // Revert right = right + left.
-        __ sub(right, Operand(left));
+        __ sub(right, left);
         break;
       case Token::SUB:
         // Revert left = left - right.
-        __ add(left, Operand(right));
+        __ add(left, right);
         break;
       case Token::MUL:
         // Right was clobbered but a copy is in ebx.
@@ -1268,7 +1489,7 @@
           ASSERT_EQ(Token::SHL, op_);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, Operand(left));
+            __ cvtsi2sd(xmm0, left);
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1290,11 +1511,11 @@
         switch (op_) {
           case Token::ADD:
             // Revert right = right + left.
-            __ sub(right, Operand(left));
+            __ sub(right, left);
             break;
           case Token::SUB:
             // Revert left = left - right.
-            __ add(left, Operand(right));
+            __ add(left, right);
             break;
           case Token::MUL:
             // Right was clobbered but a copy is in ebx.
@@ -1486,7 +1707,7 @@
         // Check result type if it is currently Int32.
         if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
-          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ cvtsi2sd(xmm2, ecx);
           __ ucomisd(xmm0, xmm2);
           __ j(not_zero, &not_int32);
           __ j(carry, &not_int32);
@@ -1548,9 +1769,9 @@
       FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
                                                         &not_int32);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1574,7 +1795,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1594,7 +1815,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1675,7 +1896,7 @@
   __ cmp(edx, factory->undefined_value());
   __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(edx, Operand(edx));
+    __ xor_(edx, edx);
   } else {
     __ mov(edx, Immediate(factory->nan_value()));
   }
@@ -1684,7 +1905,7 @@
   __ cmp(eax, factory->undefined_value());
   __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(eax, Operand(eax));
+    __ xor_(eax, eax);
   } else {
     __ mov(eax, Immediate(factory->nan_value()));
   }
@@ -1762,9 +1983,9 @@
                                                   use_sse3_,
                                                   &not_floats);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1788,7 +2009,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1808,7 +2029,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1961,9 +2182,9 @@
                                                   use_sse3_,
                                                   &call_runtime);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1987,7 +2208,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -2007,7 +2228,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2117,10 +2338,10 @@
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
       // Now edx can be overwritten losing one of the arguments as we are
       // now done and will not need it any more.
-      __ mov(edx, Operand(ebx));
+      __ mov(edx, ebx);
       __ bind(&skip_allocation);
       // Use object in edx as a result holder
-      __ mov(eax, Operand(edx));
+      __ mov(eax, edx);
       break;
     }
     case OVERWRITE_RIGHT:
@@ -2178,7 +2399,7 @@
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
     __ sar(eax, 1);
-    __ sub(Operand(esp), Immediate(2 * kPointerSize));
+    __ sub(esp, Immediate(2 * kPointerSize));
     __ mov(Operand(esp, 0), eax);
     __ fild_s(Operand(esp, 0));
     __ fst_d(Operand(esp, 0));
@@ -2189,7 +2410,7 @@
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
     Factory* factory = masm->isolate()->factory();
-    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+    __ cmp(ebx, Immediate(factory->heap_number_map()));
     __ j(not_equal, &runtime_call);
     // Input is a HeapNumber. Push it on the FPU stack and load its
     // low and high words into ebx, edx.
@@ -2201,12 +2422,12 @@
   } else {  // UNTAGGED.
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
-      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
+      __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
       __ pshufd(xmm0, xmm1, 0x1);
-      __ movd(Operand(edx), xmm0);
+      __ movd(edx, xmm0);
     }
-    __ movd(Operand(ebx), xmm1);
+    __ movd(ebx, xmm1);
   }
 
   // ST[0] or xmm1  == double value
@@ -2215,15 +2436,15 @@
   // Compute hash (the shifts are arithmetic):
   //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
   __ mov(ecx, ebx);
-  __ xor_(ecx, Operand(edx));
+  __ xor_(ecx, edx);
   __ mov(eax, ecx);
   __ sar(eax, 16);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   __ mov(eax, ecx);
   __ sar(eax, 8);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
-  __ and_(Operand(ecx),
+  __ and_(ecx,
           Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
 
   // ST[0] or xmm1 == double value.
@@ -2238,7 +2459,7 @@
   __ mov(eax, Operand(eax, cache_array_index));
   // Eax points to the cache for the type type_.
   // If NULL, the cache hasn't been initialized yet, so go through runtime.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &runtime_call_clear_stack);
 #ifdef DEBUG
   // Check that the layout of cache elements match expectations.
@@ -2264,6 +2485,8 @@
   __ cmp(edx, Operand(ecx, kIntSize));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ mov(eax, Operand(ecx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);
@@ -2274,6 +2497,7 @@
   }
 
   __ bind(&cache_miss);
+  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   // We are short on registers, so use no_reg as scratch.
   // This gives slightly larger code.
@@ -2281,10 +2505,10 @@
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
   }
   GenerateOperation(masm);
   __ mov(Operand(ecx, 0), ebx);
@@ -2299,20 +2523,21 @@
 
     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     GenerateOperation(masm);
     __ fstp_d(Operand(esp, 0));
     __ movdbl(xmm1, Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -2329,10 +2554,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(eax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(eax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2343,6 +2569,7 @@
   switch (type_) {
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -2356,7 +2583,9 @@
   // Input value is on FP stack, and also in ebx/edx.
   // Input value is possibly in xmm1.
   // Address of result (a newly allocated HeapNumber) may be in eax.
-  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+  if (type_ == TranscendentalCache::SIN ||
+      type_ == TranscendentalCache::COS ||
+      type_ == TranscendentalCache::TAN) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -2364,13 +2593,13 @@
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
-    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+    __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+    __ cmp(edi, Immediate(supported_exponent_limit));
     __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
-    __ cmp(Operand(edi), Immediate(0x7ff00000));
+    __ cmp(edi, Immediate(0x7ff00000));
     Label non_nan_result;
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
@@ -2379,7 +2608,7 @@
     __ push(Immediate(0x7ff80000));
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(2 * kPointerSize));
+    __ add(esp, Immediate(2 * kPointerSize));
     __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
@@ -2395,7 +2624,7 @@
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ test(Operand(eax), Immediate(5));
+      __ test(eax, Immediate(5));
       __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
@@ -2408,7 +2637,7 @@
       __ fprem1();
       __ fwait();
       __ fnstsw_ax();
-      __ test(Operand(eax), Immediate(0x400 /* C2 */));
+      __ test(eax, Immediate(0x400 /* C2 */));
       // If C2 is set, computation only has partial result. Loop to
       // continue computation.
       __ j(not_zero, &partial_remainder_loop);
@@ -2427,6 +2656,12 @@
       case TranscendentalCache::COS:
         __ fcos();
         break;
+      case TranscendentalCache::TAN:
+        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+        // FP register stack.
+        __ fptan();
+        __ fstp(0);  // Pop FP register stack.
+        break;
       default:
         UNREACHABLE();
     }
@@ -2541,13 +2776,13 @@
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
 
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
 
   __ bind(&done);
@@ -2571,12 +2806,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2592,11 +2827,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm0, Operand(scratch));
+  __ cvtsi2sd(xmm0, scratch);
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm1, Operand(scratch));
+  __ cvtsi2sd(xmm1, scratch);
 }
 
 
@@ -2604,12 +2839,12 @@
                                                     Label* non_int32,
                                                     Register scratch) {
   __ cvttsd2si(scratch, Operand(xmm0));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm0, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
   __ cvttsd2si(scratch, Operand(xmm1));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm1, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
@@ -2717,7 +2952,7 @@
 
   // Save 1 in xmm3 - we need this several times later on.
   __ mov(ecx, Immediate(1));
-  __ cvtsi2sd(xmm3, Operand(ecx));
+  __ cvtsi2sd(xmm3, ecx);
 
   Label exponent_nonsmi;
   Label base_nonsmi;
@@ -2728,7 +2963,7 @@
   // Optimized version when both exponent and base are smis.
   Label powi;
   __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ jmp(&powi);
   // exponent is smi and base is a heapnumber.
   __ bind(&base_nonsmi);
@@ -2770,11 +3005,11 @@
 
   // base has the original value of the exponent - if the exponent  is
   // negative return 1/result.
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ mov(ecx, Immediate(0x7FB00000));
-  __ movd(xmm0, Operand(ecx));
+  __ movd(xmm0, ecx);
   __ cvtss2sd(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
@@ -2797,7 +3032,7 @@
   Label handle_special_cases;
   __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
   __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
@@ -2806,7 +3041,7 @@
   __ j(not_equal, &call_runtime);
   __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
   __ and_(ecx, HeapNumber::kExponentMask);
-  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
   // base is NaN or +/-Infinity
   __ j(greater_equal, &call_runtime);
   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
@@ -2817,7 +3052,7 @@
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ mov(ecx, Immediate(0xBF000000));
-  __ movd(xmm2, Operand(ecx));
+  __ movd(xmm2, ecx);
   __ cvtss2sd(xmm2, xmm2);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
@@ -2873,13 +3108,13 @@
   Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2895,7 +3130,7 @@
   // comparison to get negative check for free.
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, Operand(ecx));
+  __ cmp(edx, ecx);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2926,7 +3161,7 @@
   Label runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &runtime, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
@@ -2957,7 +3192,7 @@
   Label adaptor_frame, try_allocate;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
@@ -2976,7 +3211,7 @@
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less_equal, &try_allocate, Label::kNear);
   __ mov(ebx, ecx);
 
@@ -2990,7 +3225,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
   __ bind(&no_parameter_map);
@@ -2999,7 +3234,7 @@
   __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
 
   // 3. Arguments object.
-  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
 
   // Do the allocation of all three objects in one go.
   __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3014,7 +3249,7 @@
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
   __ mov(edi, Operand(edi,
          Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3069,7 +3304,7 @@
 
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &skip_parameter_map);
 
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3093,7 +3328,7 @@
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   __ add(ebx, Operand(esp, 4 * kPointerSize));
-  __ sub(ebx, Operand(eax));
+  __ sub(ebx, eax);
   __ mov(ecx, FACTORY->the_hole_value());
   __ mov(edx, edi);
   __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3110,12 +3345,12 @@
   __ jmp(&parameters_test, Label::kNear);
 
   __ bind(&parameters_loop);
-  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+  __ sub(eax, Immediate(Smi::FromInt(1)));
   __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
   __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &parameters_loop, Label::kNear);
   __ pop(ecx);
 
@@ -3135,18 +3370,18 @@
   Label arguments_loop, arguments_test;
   __ mov(ebx, Operand(esp, 1 * kPointerSize));
   __ mov(edx, Operand(esp, 4 * kPointerSize));
-  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(Operand(edx), ebx);
+  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(edx, ebx);
   __ jmp(&arguments_test, Label::kNear);
 
   __ bind(&arguments_loop);
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ mov(eax, Operand(edx, 0));
   __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
 
   __ bind(&arguments_test);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less, &arguments_loop, Label::kNear);
 
   // Restore.
@@ -3174,7 +3409,7 @@
   Label adaptor_frame, try_allocate, runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
@@ -3193,11 +3428,11 @@
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
+  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3224,7 +3459,7 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
@@ -3246,8 +3481,8 @@
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(Operand(edi), Immediate(kPointerSize));
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ dec(ecx);
   __ j(not_zero, &loop);
 
@@ -3268,10 +3503,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  esp[0]: return address
@@ -3294,7 +3525,7 @@
   ExternalReference address_of_regexp_stack_memory_size =
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -3315,7 +3546,7 @@
   // ecx: RegExp data (FixedArray)
   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
   __ j(not_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3325,7 +3556,7 @@
   // uses the asumption that smis are 2 * their untagged value.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
   __ j(above, &runtime);
@@ -3347,7 +3578,7 @@
   // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
   __ JumpIfNotSmi(eax, &runtime);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(above_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3367,8 +3598,8 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
-  __ cmp(edx, Operand(eax));
+  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ cmp(edx, eax);
   __ j(greater, &runtime);
 
   // Reset offset for possibly sliced string.
@@ -3380,27 +3611,40 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ and_(ebx,
-          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+  __ and_(ebx, kIsNotStringMask |
+               kStringRepresentationMask |
+               kStringEncodingMask |
+               kShortExternalStringMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.
-  __ and_(Operand(ebx),
-          Immediate(kIsNotStringMask | kStringRepresentationMask));
+  // Any other flat string must be a flat ascii string.  None of the following
+  // string type tests will succeed if subject is not a string or a short
+  // external string.
+  __ and_(ebx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kShortExternalStringMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
+  // ebx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+  __ cmp(ebx, Immediate(kExternalStringTag));
   __ j(less, &cons_string);
-  __ j(equal, &runtime);
+  __ j(equal, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+  __ j(not_zero, &runtime);
 
   // String is sliced.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
@@ -3422,10 +3666,10 @@
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be ascii.
+  // Any other flat string must be sequential ascii or external.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
-  __ j(not_zero, &runtime);
+  __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
   // eax: subject string (flat ascii)
@@ -3504,14 +3748,14 @@
   // Prepare start and end index of the input.
   // Load the length from the original sliced string if that is the case.
   __ mov(esi, FieldOperand(esi, String::kLengthOffset));
-  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
+  __ add(esi, edi);  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
+  __ add(ebx, edi);  // Calculate input start wrt offset.
 
   // ebx: start index of the input string
   // esi: end index of the input string
   Label setup_two_byte, setup_rest;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(esi);
   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3531,8 +3775,8 @@
   __ bind(&setup_rest);
 
   // Locate the code entry and call it.
-  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ call(Operand(edx));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ call(edx);
 
   // Drop arguments and come back to JS mode.
   __ LeaveApiExitFrame();
@@ -3553,11 +3797,9 @@
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
-  __ mov(edx,
-         Operand::StaticVariable(ExternalReference::the_hole_value_location(
-             masm->isolate())));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(eax, Operand::StaticVariable(pending_exception));
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(equal, &runtime);
   // For exception, throw the exception again.
 
@@ -3578,7 +3820,7 @@
 
   __ bind(&failure);
   // For failure to match, return null.
-  __ mov(Operand(eax), factory->null_value());
+  __ mov(eax, factory->null_value());
   __ ret(4 * kPointerSize);
 
   // Load RegExp data.
@@ -3589,7 +3831,7 @@
   // Calculate number of capture registers (number_of_captures + 1) * 2.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
@@ -3605,12 +3847,18 @@
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastSubjectOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastInputOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -3624,7 +3872,7 @@
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
-  __ sub(Operand(edx), Immediate(1));
+  __ sub(edx, Immediate(1));
   __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3642,6 +3890,27 @@
   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ ret(4 * kPointerSize);
 
+  // External string.  Short external strings have already been ruled out.
+  // eax: subject string (expected to be external)
+  // ebx: scratch
+  __ bind(&external_string);
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ test_b(ebx, kIsIndirectStringMask);
+    __ Assert(zero, "external string expected, but not found");
+  }
+  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ test_b(ebx, kStringEncodingMask);
+  __ j(not_zero, &seq_ascii_string);
+  __ jmp(&seq_two_byte_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3655,7 +3924,7 @@
   Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
   __ JumpIfNotSmi(ebx, &slowcase);
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
   __ j(above, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
   STATIC_ASSERT(kSmiTag == 0);
@@ -3715,10 +3984,10 @@
   // ebx: Start of elements in FixedArray.
   // edx: the hole.
   Label loop;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ bind(&loop);
   __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
-  __ sub(Operand(ecx), Immediate(1));
+  __ sub(ecx, Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
 
@@ -3743,16 +4012,16 @@
   Register scratch = scratch2;
 
   // Load the number string cache.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
   __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
   __ mov(number_string_cache,
-         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
   __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
-  __ sub(Operand(mask), Immediate(1));  // Make mask.
+  __ sub(mask, Immediate(1));  // Make mask.
 
   // Calculate the entry in the number string cache. The hash value in the
   // number string cache for smis is just the smi value, and the hash for
@@ -3778,7 +4047,7 @@
     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     // Object is heap number and hash is now in scratch. Calculate cache index.
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
     Register index = scratch;
     Register probe = mask;
     __ mov(probe,
@@ -3804,7 +4073,7 @@
 
   __ bind(&smi_hash_calculated);
   // Object is smi and hash is now in scratch. Calculate cache index.
-  __ and_(scratch, Operand(mask));
+  __ and_(scratch, mask);
   Register index = scratch;
   // Check if the entry is the smi we are looking for.
   __ cmp(object,
@@ -3856,10 +4125,10 @@
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
-    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
+    __ sub(edx, eax);  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
@@ -3867,8 +4136,8 @@
     __ ret(0);
     __ bind(&non_smi);
   } else if (FLAG_debug_code) {
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ test(ecx, Immediate(kSmiTagMask));
     __ Assert(not_zero, "Unexpected smi operands.");
   }
@@ -3880,7 +4149,7 @@
   // for NaN and undefined.
   {
     Label not_identical;
-    __ cmp(eax, Operand(edx));
+    __ cmp(eax, edx);
     __ j(not_equal, &not_identical);
 
     if (cc_ != equal) {
@@ -3929,7 +4198,7 @@
       __ Set(eax, Immediate(0));
       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
       // bits.
-      __ add(edx, Operand(edx));
+      __ add(edx, edx);
       __ cmp(edx, kQuietNaNHighBitsMask << 1);
       if (cc_ == equal) {
         STATIC_ASSERT(EQUAL != 1);
@@ -3963,19 +4232,19 @@
     STATIC_ASSERT(kSmiTag == 0);
     ASSERT_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
-    __ and_(ecx, Operand(eax));
-    __ test(ecx, Operand(edx));
+    __ and_(ecx, eax);
+    __ test(ecx, edx);
     __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
     STATIC_ASSERT(kSmiTagMask == 1);
     // ecx still holds eax & kSmiTag, which is either zero or one.
-    __ sub(Operand(ecx), Immediate(0x01));
+    __ sub(ecx, Immediate(0x01));
     __ mov(ebx, edx);
-    __ xor_(ebx, Operand(eax));
-    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
-    __ xor_(ebx, Operand(eax));
+    __ xor_(ebx, eax);
+    __ and_(ebx, ecx);  // ebx holds either 0 or eax ^ edx.
+    __ xor_(ebx, eax);
     // if eax was smi, ebx is now edx, else eax.
 
     // Check if the non-smi operand is a heap number.
@@ -4037,9 +4306,9 @@
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
-      __ cmov(above, eax, Operand(ecx));
+      __ cmov(above, eax, ecx);
       __ mov(ecx, Immediate(Smi::FromInt(-1)));
-      __ cmov(below, eax, Operand(ecx));
+      __ cmov(below, eax, ecx);
       __ ret(0);
     } else {
       FloatingPointHelper::CheckFloatOperands(
@@ -4198,43 +4467,99 @@
 }
 
 
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+  code->set_has_function_cache(RecordCallTarget());
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+  // 1 ~ size of the test eax opcode.
+  Object* cell = Memory::Object_at(address + kPointerSize + 1);
+  // Low-level because clearing happens during GC.
+  reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
+      RawUninitializedSentinel(heap));
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+  // 1 ~ size of the test eax opcode.
+  Object* cell = Memory::Object_at(address + kPointerSize + 1);
+  return JSGlobalPropertyCell::cast(cell)->value();
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // edi : the function to call
+  Isolate* isolate = masm->isolate();
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
   // function stub.
   if (ReceiverMightBeImplicit()) {
-    Label call;
+    Label receiver_ok;
     // Get the receiver from the stack.
     // +1 ~ return address
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
     // Call as function is indicated with the hole.
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
-    __ j(not_equal, &call, Label::kNear);
+    __ cmp(eax, isolate->factory()->the_hole_value());
+    __ j(not_equal, &receiver_ok, Label::kNear);
     // Patch the receiver on the stack with the global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
     __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
-    __ bind(&call);
+    __ bind(&receiver_ok);
   }
 
-  // Get the function to call from the stack.
-  // +2 ~ receiver, return address
-  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(edi, &non_function);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &slow);
 
+  if (RecordCallTarget()) {
+    // Cache the called function in a global property cell in the
+    // instruction stream after the call.  Cache states are uninitialized,
+    // monomorphic (indicated by a JSFunction), and megamorphic.
+    Label initialize, call;
+    // Load the cache cell address into ebx and the cache state into ecx.
+    __ mov(ebx, Operand(esp, 0));  // Return address.
+    __ mov(ebx, Operand(ebx, 1));  // 1 ~ sizeof 'test eax' opcode in bytes.
+    __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+    // A monomorphic cache hit or an already megamorphic state: invoke the
+    // function without changing the state.
+    __ cmp(ecx, edi);
+    __ j(equal, &call, Label::kNear);
+    __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
+    __ j(equal, &call, Label::kNear);
+
+    // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+    // megamorphic.
+    __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
+    __ j(equal, &initialize, Label::kNear);
+    // MegamorphicSentinel is an immortal immovable object (undefined) so no
+    // write-barrier is needed.
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+           Immediate(MegamorphicSentinel(isolate)));
+    __ jmp(&call, Label::kNear);
+
+    // An uninitialized cache is patched with the function.
+    __ bind(&initialize);
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+    // No need for a write barrier here - cells are rescanned.
+
+    __ bind(&call);
+  }
+
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
 
   if (ReceiverMightBeImplicit()) {
     Label call_as_function;
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ cmp(eax, isolate->factory()->the_hole_value());
     __ j(equal, &call_as_function);
     __ InvokeFunction(edi,
                       actual,
@@ -4251,6 +4576,16 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.
+    __ mov(ebx, Operand(esp, 0));
+    __ mov(ebx, Operand(ebx, 1));
+    // MegamorphicSentinel is an immortal immovable object (undefined) so no
+    // write barrier is needed.
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+           Immediate(MegamorphicSentinel(isolate)));
+  }
   // Check for function proxy.
   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
   __ j(not_equal, &non_function);
@@ -4262,8 +4597,7 @@
   __ SetCallKind(ecx, CALL_AS_FUNCTION);
   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
   {
-    Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
     __ jmp(adaptor, RelocInfo::CODE_TARGET);
   }
 
@@ -4275,8 +4609,7 @@
   __ Set(ebx, Immediate(0));
   __ SetCallKind(ecx, CALL_AS_METHOD);
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-  Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
   __ jmp(adaptor, RelocInfo::CODE_TARGET);
 }
 
@@ -4286,6 +4619,35 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(eax);
 }
@@ -4332,7 +4694,7 @@
   __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
   __ mov(Operand(esp, 2 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ call(Operand(ebx));
+  __ call(ebx);
   // Result is in eax or edx:eax - do not destroy these registers!
 
   if (always_allocate_scope) {
@@ -4364,8 +4726,7 @@
   // should have returned some failure value.
   if (FLAG_debug_code) {
     __ push(edx);
-    __ mov(edx, Operand::StaticVariable(
-        ExternalReference::the_hole_value_location(masm->isolate())));
+    __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
     Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
@@ -4376,7 +4737,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_);
+  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
   __ ret(0);
 
   // Handling of failure.
@@ -4393,10 +4754,8 @@
   __ j(equal, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
   __ mov(eax, Operand::StaticVariable(pending_exception_address));
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception_address), edx);
 
   // Special handling of termination exceptions which are uncatchable
@@ -4431,7 +4790,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles_);
+  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -4482,12 +4841,12 @@
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
   // Setup frame.
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Push marker in two places.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -4515,38 +4874,38 @@
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
 
-  // Call a faked try-block that does the invoke.
-  __ call(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
   __ mov(Operand::StaticVariable(pending_exception), eax);
   __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
 
   // Clear any pending exceptions.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception), edx);
 
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline
-  // builtin and pop the faked function when we return. Notice that we
-  // cannot store a reference to the trampoline code directly in this
-  // stub, because the builtin stubs may not have been generated yet.
+  // Invoke the function by calling through JS entry trampoline builtin and
+  // pop the faked function when we return. Notice that we cannot store a
+  // reference to the trampoline code directly in this stub, because the
+  // builtin stubs may not have been generated yet.
   if (is_construct) {
-    ExternalReference construct_entry(
-        Builtins::kJSConstructEntryTrampoline,
-        masm->isolate());
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      masm->isolate());
     __ mov(edx, Immediate(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline,
@@ -4555,7 +4914,7 @@
   }
   __ mov(edx, Operand(edx, 0));  // deref address
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-  __ call(Operand(edx));
+  __ call(edx);
 
   // Unlink this frame from the handler chain.
   __ PopTryHandler();
@@ -4563,8 +4922,7 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
-  __ cmp(Operand(ebx),
-         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
@@ -4578,7 +4936,7 @@
   __ pop(ebx);
   __ pop(esi);
   __ pop(edi);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
+  __ add(esp, Immediate(2 * kPointerSize));  // remove markers
 
   // Restore frame pointer and return.
   __ pop(ebp);
@@ -4617,12 +4975,12 @@
   static const int kDeltaToCmpImmediate = 2;
   static const int kDeltaToMov = 8;
   static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
-  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
+  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
+  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
   static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
 
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
 
   ASSERT_EQ(object.code(), InstanceofStub::left().code());
   ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -4644,22 +5002,23 @@
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-    __ cmp(function,
-           Operand::StaticArray(scratch, times_pointer_size, roots_address));
+    __ cmp(function, Operand::StaticArray(scratch,
+                                          times_pointer_size,
+                                          roots_array_start));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
     __ cmp(map, Operand::StaticArray(
-        scratch, times_pointer_size, roots_address));
+        scratch, times_pointer_size, roots_array_start));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(eax, Operand::StaticArray(
-        scratch, times_pointer_size, roots_address));
+        scratch, times_pointer_size, roots_array_start));
     __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
     __ bind(&miss);
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -4669,9 +5028,10 @@
   // map and function. The cached answer will be set when it is known below.
   if (!HasCallSiteInlineCheck()) {
   __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+         map);
   __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
          function);
   } else {
     // The constants for the code patching are based on no push instructions
@@ -4681,12 +5041,13 @@
     __ mov(scratch, Operand(esp, 0 * kPointerSize));
     __ sub(scratch, Operand(esp, 1 * kPointerSize));
     if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
+      __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
-      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
+      __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
     }
-    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
+    __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
+    __ mov(Operand(scratch, 0), map);
   }
 
   // Loop through the prototype chain of the object looking for the function
@@ -4694,10 +5055,10 @@
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(scratch, Operand(prototype));
+  __ cmp(scratch, prototype);
   __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(scratch), Immediate(factory->null_value()));
+  __ cmp(scratch, Immediate(factory->null_value()));
   __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -4708,7 +5069,7 @@
     __ Set(eax, Immediate(0));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(scratch,
-                                times_pointer_size, roots_address), eax);
+                                times_pointer_size, roots_array_start), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -4730,7 +5091,7 @@
     __ Set(eax, Immediate(Smi::FromInt(1)));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(
-        scratch, times_pointer_size, roots_address), eax);
+        scratch, times_pointer_size, roots_array_start), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -4788,13 +5149,14 @@
     __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     // Call the builtin and convert 0/1 to true/false.
-    __ EnterInternalFrame();
-    __ push(object);
-    __ push(function);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(object);
+      __ push(function);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     Label true_value, done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
     __ jmp(&done, Label::kNear);
@@ -4854,11 +5216,6 @@
 // StringCharCodeAtGenerator
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -4873,85 +5230,26 @@
   // If the index is non-smi trigger the non-smi case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+  __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ SmiUntag(index_);
 
-  // Handle non-flat strings.
-  __ and_(result_, kStringRepresentationMask);
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(result_, kExternalStringTag);
-  __ j(greater, &sliced_string, Label::kNear);
-  __ j(equal, &call_runtime_);
+  Factory* factory = masm->isolate()->factory();
+  StringCharLoadGenerator::Generate(
+      masm, factory, object_, index_, result_, &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
-         Immediate(masm->isolate()->factory()->empty_string()));
-  __ j(not_equal, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string, Label::kNear);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
-  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &call_runtime_);
-  __ jmp(&flat_string, Label::kNear);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result_, FieldOperand(object_,
-                                   scratch_, times_1,  // Scratch is smi-tagged.
-                                   SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ SmiUntag(scratch_);
-  __ movzx_b(result_, FieldOperand(object_,
-                                   scratch_, times_1,
-                                   SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
   __ SmiTag(result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -4963,7 +5261,6 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4972,12 +5269,11 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!scratch_.is(eax)) {
+  if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ mov(scratch_, eax);
+    __ mov(index_, eax);
   }
-  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4985,7 +5281,7 @@
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
   STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4995,6 +5291,7 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ SmiTag(index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(eax)) {
@@ -5036,7 +5333,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5063,7 +5361,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -5110,7 +5409,7 @@
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
@@ -5119,7 +5418,7 @@
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
@@ -5134,13 +5433,13 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
-  __ add(ebx, Operand(ecx));
+  __ add(ebx, ecx);
   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
   __ j(overflow, &string_add_runtime);
   // Use the symbol table when adding two one character strings, as it
   // helps later optimizations to return a symbol here.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+  __ cmp(ebx, Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
   // Check that both strings are non-external ascii strings.
@@ -5177,7 +5476,7 @@
                          &string_add_runtime);
   // Pack both characters in ebx.
   __ shl(ecx, kBitsPerByte);
-  __ or_(ebx, Operand(ecx));
+  __ or_(ebx, ecx);
   // Set the characters in the new string.
   __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -5185,7 +5484,7 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
   __ j(below, &string_add_flat_result);
 
   // If result is not supposed to be flat allocate a cons string object. If both
@@ -5195,7 +5494,7 @@
   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
-  __ and_(ecx, Operand(edi));
+  __ and_(ecx, edi);
   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(ecx, Immediate(kStringEncodingMask));
@@ -5223,7 +5522,7 @@
   __ j(not_zero, &ascii_data);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ xor_(edi, Operand(ecx));
+  __ xor_(edi, ecx);
   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
@@ -5271,12 +5570,12 @@
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
@@ -5286,7 +5585,7 @@
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5310,13 +5609,13 @@
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx),
+  __ add(ecx,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx),
+  __ add(edx,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
@@ -5327,7 +5626,7 @@
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5403,15 +5702,15 @@
   if (ascii) {
     __ mov_b(scratch, Operand(src, 0));
     __ mov_b(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(1));
-    __ add(Operand(dest), Immediate(1));
+    __ add(src, Immediate(1));
+    __ add(dest, Immediate(1));
   } else {
     __ mov_w(scratch, Operand(src, 0));
     __ mov_w(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(2));
-    __ add(Operand(dest), Immediate(2));
+    __ add(src, Immediate(2));
+    __ add(dest, Immediate(2));
   }
-  __ sub(Operand(count), Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -5434,7 +5733,7 @@
 
   // Nothing to do for zero characters.
   Label done;
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Make count the number of bytes to copy.
@@ -5459,7 +5758,7 @@
 
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Copy remaining characters.
@@ -5467,9 +5766,9 @@
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
-  __ add(Operand(src), Immediate(1));
-  __ add(Operand(dest), Immediate(1));
-  __ sub(Operand(count), Immediate(1));
+  __ add(src, Immediate(1));
+  __ add(dest, Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 
   __ bind(&done);
@@ -5491,12 +5790,12 @@
   // different hash algorithm. Don't try to look for these in the symbol table.
   Label not_array_index;
   __ mov(scratch, c1);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_probed);
 
   __ bind(&not_array_index);
@@ -5509,24 +5808,24 @@
   // Collect the two characters in a register.
   Register chars = c1;
   __ shl(c2, kBitsPerByte);
-  __ or_(chars, Operand(c2));
+  __ or_(chars, c2);
 
   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   // hash:  hash of two character string.
 
   // Load the symbol table.
   Register symbol_table = c2;
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(masm->isolate());
   __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
   __ mov(symbol_table,
-         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
 
   // Calculate capacity mask from the symbol table capacity.
   Register mask = scratch2;
   __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ SmiUntag(mask);
-  __ sub(Operand(mask), Immediate(1));
+  __ sub(mask, Immediate(1));
 
   // Registers
   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -5544,9 +5843,9 @@
     // Calculate entry in symbol table.
     __ mov(scratch, hash);
     if (i > 0) {
-      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+      __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
     }
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
 
     // Load the entry from the symbol table.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
@@ -5560,7 +5859,7 @@
     Factory* factory = masm->isolate()->factory();
     __ cmp(candidate, factory->undefined_value());
     __ j(equal, not_found);
-    __ cmp(candidate, factory->null_value());
+    __ cmp(candidate, factory->the_hole_value());
     __ j(equal, &next_probe[i]);
 
     // If length is not 2 the string is not a candidate.
@@ -5582,7 +5881,7 @@
     // Check if the two characters match.
     __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
     __ and_(temp, 0x0000ffff);
-    __ cmp(chars, Operand(temp));
+    __ cmp(chars, temp);
     __ j(equal, &found_in_symbol_table);
     __ bind(&next_probe_pop_mask[i]);
     __ pop(mask);
@@ -5606,29 +5905,14 @@
                                     Register hash,
                                     Register character,
                                     Register scratch) {
-  // hash = (seed + character) + ((seed + character) << 10);
-  if (Serializer::enabled()) {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(masm->isolate());
-    __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
-    __ mov(scratch, Operand::StaticArray(scratch,
-                                         times_pointer_size,
-                                         roots_address));
-    __ SmiUntag(scratch);
-    __ add(scratch, Operand(character));
-    __ mov(hash, scratch);
-    __ shl(scratch, 10);
-    __ add(hash, Operand(scratch));
-  } else {
-    int32_t seed = masm->isolate()->heap()->HashSeed();
-    __ lea(scratch, Operand(character, seed));
-    __ shl(scratch, 10);
-    __ lea(hash, Operand(scratch, character, times_1, seed));
-  }
+  // hash = character + (character << 10);
+  __ mov(hash, character);
+  __ shl(hash, 10);
+  __ add(hash, character);
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5637,15 +5921,15 @@
                                             Register character,
                                             Register scratch) {
   // hash += character;
-  __ add(hash, Operand(character));
+  __ add(hash, character);
   // hash += hash << 10;
   __ mov(scratch, hash);
   __ shl(scratch, 10);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5655,22 +5939,24 @@
   // hash += hash << 3;
   __ mov(scratch, hash);
   __ shl(scratch, 3);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 11;
   __ mov(scratch, hash);
   __ shr(scratch, 11);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
   // hash += hash << 15;
   __ mov(scratch, hash);
   __ shl(scratch, 15);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
 
-  __ and_(hash, String::kHashBitMask);
+  uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+  __ and_(hash, kHashShiftCutOffMask);
 
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
+  __ test(hash, hash);
   __ j(not_zero, &hash_not_zero, Label::kNear);
-  __ mov(hash, Immediate(StringHasher::kZeroHash));
+  __ mov(hash, Immediate(27));
   __ bind(&hash_not_zero);
 }
 
@@ -5700,7 +5986,7 @@
   __ JumpIfNotSmi(ecx, &runtime);
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, Operand(edx));
+  __ sub(ecx, edx);
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
   Label return_eax;
   __ j(equal, &return_eax);
@@ -5751,18 +6037,15 @@
     // ebx: instance type
     // ecx: sub string length
     // edx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
+    Label allocate_slice, sliced_string, seq_or_external_string;
     __ cmp(ecx, SlicedString::kMinLength);
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ test(ebx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ test(ebx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
+    __ j(zero, &seq_or_external_string, Label::kNear);
 
     Factory* factory = masm->isolate()->factory();
     __ test(ebx, Immediate(kSlicedNotConsMask));
@@ -5780,8 +6063,8 @@
     __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
     __ jmp(&allocate_slice, Label::kNear);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(edi, eax);
 
     __ bind(&allocate_slice);
@@ -5832,13 +6115,13 @@
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   __ SmiUntag(ebx);
-  __ add(esi, Operand(ebx));
+  __ add(esi, ebx);
 
   // eax: result string
   // ecx: result length
@@ -5867,18 +6150,17 @@
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi),
+  __ add(edi,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi),
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(esi, Operand(ebx));
+  __ add(esi, ebx);
 
   // eax: result string
   // ecx: result length
@@ -5918,7 +6200,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(length, Operand(length));
+  __ test(length, length);
   __ j(not_zero, &compare_chars, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
@@ -5953,14 +6235,14 @@
 
   __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
-  __ sub(scratch1, Operand(length_delta));
+  __ sub(scratch1, length_delta);
   __ bind(&left_shorter);
 
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
   Label compare_lengths;
-  __ test(min_length, Operand(min_length));
+  __ test(min_length, min_length);
   __ j(zero, &compare_lengths, Label::kNear);
 
   // Compare characters.
@@ -5970,7 +6252,7 @@
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
-  __ test(length_delta, Operand(length_delta));
+  __ test(length_delta, length_delta);
   __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
@@ -6019,7 +6301,7 @@
   __ mov_b(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, chars_not_equal_near);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -6036,7 +6318,7 @@
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
   Label not_same;
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6052,7 +6334,7 @@
   // Compare flat ascii strings.
   // Drop arguments from the stack.
   __ pop(ecx);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ add(esp, Immediate(2 * kPointerSize));
   __ push(ecx);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
@@ -6066,16 +6348,16 @@
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ or_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ or_(ecx, eax);
   __ JumpIfNotSmi(ecx, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
-    __ sub(eax, Operand(edx));
+    __ sub(eax, edx);
   } else {
     Label done;
-    __ sub(edx, Operand(eax));
+    __ sub(edx, eax);
     __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
@@ -6095,8 +6377,8 @@
   Label generic_stub;
   Label unordered;
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -6124,9 +6406,9 @@
     // Performing mov, because xor would destroy the flag register.
     __ mov(eax, 0);  // equal
     __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, Operand(ecx));
+    __ cmov(above, eax, ecx);
     __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, Operand(ecx));
+    __ cmov(below, eax, ecx);
     __ ret(0);
 
     __ bind(&unordered);
@@ -6153,9 +6435,9 @@
 
   // Check that both operands are heap objects.
   Label miss;
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss, Label::kNear);
 
   // Check that both operands are symbols.
@@ -6164,13 +6446,13 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
+  __ and_(tmp1, tmp2);
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &miss, Label::kNear);
 
   // Symbols are compared by identity.
   Label done;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
   ASSERT(right.is(eax));
@@ -6199,9 +6481,9 @@
   Register tmp3 = edi;
 
   // Check that both operands are heap objects.
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss);
 
   // Check that both operands are strings. This leaves the instance
@@ -6212,13 +6494,13 @@
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   __ mov(tmp3, tmp1);
   STATIC_ASSERT(kNotStringTag != 0);
-  __ or_(tmp3, Operand(tmp2));
+  __ or_(tmp3, tmp2);
   __ test(tmp3, Immediate(kIsNotStringMask));
   __ j(not_zero, &miss);
 
   // Fast check for identical strings.
   Label not_same;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6232,7 +6514,7 @@
   // because we already know they are not identical.
   Label do_compare;
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
+  __ and_(tmp1, tmp2);
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &do_compare, Label::kNear);
   // Make sure eax is non-zero. At this point input operands are
@@ -6265,8 +6547,8 @@
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6275,7 +6557,7 @@
   __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
   __ ret(0);
 
   __ bind(&miss);
@@ -6290,15 +6572,16 @@
   __ push(eax);
   __ push(ecx);
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                             masm->isolate());
-  __ EnterInternalFrame();
-  __ push(edx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                               masm->isolate());
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edx);
+    __ push(eax);
+    __ push(Immediate(Smi::FromInt(op_)));
+    __ CallExternalReference(miss, 3);
+  }
 
   // Compute the entry point of the rewritten stub.
   __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -6310,7 +6593,7 @@
   __ push(ecx);
 
   // Do a tail call to the rewritten stub.
-  __ jmp(Operand(edi));
+  __ jmp(edi);
 }
 
 
@@ -6319,13 +6602,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register r0) {
   ASSERT(name->IsSymbol());
 
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
@@ -6339,8 +6621,8 @@
     // Capacity is smi 2^n.
     __ mov(index, FieldOperand(properties, kCapacityOffset));
     __ dec(index);
-    __ and_(Operand(index),
-           Immediate(Smi::FromInt(name->Hash() +
+    __ and_(index,
+            Immediate(Smi::FromInt(name->Hash() +
                                    StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
@@ -6371,12 +6653,10 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ push(Immediate(Handle<Object>(name)));
   __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
-  __ test(r0, Operand(r0));
+  __ CallStub(&stub);
+  __ test(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
-  return result;
 }
 
 
@@ -6391,6 +6671,11 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
+  ASSERT(!elements.is(r0));
+  ASSERT(!elements.is(r1));
+  ASSERT(!name.is(r0));
+  ASSERT(!name.is(r1));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6406,9 +6691,9 @@
     __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
     __ shr(r0, String::kHashShift);
     if (i > 0) {
-      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r0, Operand(r1));
+    __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
@@ -6432,13 +6717,15 @@
   __ push(r0);
   __ CallStub(&stub);
 
-  __ test(r1, Operand(r1));
+  __ test(r1, r1);
   __ j(zero, miss);
   __ jmp(done);
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -6469,8 +6756,7 @@
     // Compute the masked index: (hash + i + i * i) & mask.
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
     if (i > 0) {
-      __ add(Operand(scratch),
-             Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
     }
     __ and_(scratch, Operand(esp, 0));
 
@@ -6526,6 +6812,364 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { ebx, eax, edi, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+  { ebx, edi, edx, OMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal and CallFunctionStub.
+  { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore
+  { ebx, edi, edx, EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { edi, edx, ecx, EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { edx, ebx, edi, EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { eax, edx, esi, EMIT_REMEMBERED_SET},
+  { edx, eax, edi, EMIT_REMEMBERED_SET},
+  // StoreArrayLiteralElementStub::Generate
+  { ebx, eax, ecx, EMIT_REMEMBERED_SET},
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+
+  CpuFeatures::TryForceFeatureScope scope(SSE2);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    StoreBufferOverflowStub stub2(kSaveFPRegs);
+    stub2.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm,
+        kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+        mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm,
+      kReturnOnNoNeedToInformIncrementalMarker,
+      mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.address());  // Slot.
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0());  // Value.
+  }
+  __ mov(Operand(esp, 2 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label object_is_black, need_incremental, need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &object_is_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&object_is_black);
+
+  // Get the value from the slot.
+  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     not_zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ jmp(&need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : element value to store
+  //  -- ebx    : array literal
+  //  -- edi    : map of array literal
+  //  -- ecx    : element index as smi
+  //  -- edx    : array literal index in function
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label slow_elements_from_double;
+  Label fast_elements;
+
+  __ CheckFastElements(edi, &double_elements);
+
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(eax, &smi_element);
+  __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+
+  __ bind(&slow_elements);
+  __ pop(edi);  // Pop return address and remember to put back later for tail
+                // call.
+  __ push(ebx);
+  __ push(ecx);
+  __ push(eax);
+  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+  __ push(edx);
+  __ push(edi);  // Return return address so that tail call returns to right
+                 // place.
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  __ bind(&slow_elements_from_double);
+  __ pop(edx);
+  __ jmp(&slow_elements);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
+                           FixedArrayBase::kHeaderSize));
+  __ mov(Operand(ecx, 0), eax);
+  // Update the write barrier for the array store.
+  __ RecordWrite(ebx, ecx, eax,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
+                      FixedArrayBase::kHeaderSize), eax);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+
+  __ push(edx);
+  __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(eax,
+                                 edx,
+                                 ecx,
+                                 edi,
+                                 xmm0,
+                                 &slow_elements_from_double,
+                                 false);
+  __ pop(edx);
+  __ ret(0);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index fa255da..4d23c3a 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,6 +60,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -128,7 +147,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -215,7 +234,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -402,13 +421,12 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -418,6 +436,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -430,7 +450,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -451,6 +471,265 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always ecx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+      if (scratch0.is(ecx)) {
+        scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(ecx)) {
+        object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(ecx)) {
+        address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->push(ecx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->mov(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->mov(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with ecx.
+      if (!object_.is(object_orig_)) {
+        masm->mov(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->mov(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->pop(ecx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The caller saved
+    // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        masm->sub(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+        // Save all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        // Restore all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+        }
+        masm->add(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+      }
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always ecx.
+
+    Register GetRegThatIsNotEcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(ecx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  }
+;
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 3> {};
+  class ValueBits: public BitField<int, 3, 3> {};
+  class AddressBits: public BitField<int, 6, 3> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 3a657bd..e5ca02c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -30,6 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -39,12 +40,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
@@ -108,14 +113,14 @@
     __ mov(edx, dst);
     __ and_(edx, 0xF);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(16));
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(16));
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
 
     // edi is now aligned. Check if esi is also aligned.
     Label unaligned_source;
-    __ test(Operand(src), Immediate(0x0F));
+    __ test(src, Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
       // Copy loop for aligned source and destination.
@@ -130,11 +135,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqa(xmm0, Operand(src, 0x00));
         __ movdqa(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -142,12 +147,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqa(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -176,11 +181,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqu(xmm0, Operand(src, 0x00));
         __ movdqu(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -188,12 +193,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqu(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -228,10 +233,10 @@
     __ mov(edx, dst);
     __ and_(edx, 0x03);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(4));  // edx = 4 - (dst & 3)
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(4));  // edx = 4 - (dst & 3)
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
     // edi is now aligned, ecx holds number of remaning bytes to copy.
 
     __ mov(edx, count);
@@ -261,6 +266,370 @@
 
 #undef __
 
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required;
+  __ push(eax);
+  __ push(ebx);
+
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
+
+  // Allocate new FixedDoubleArray.
+  // edx: receiver
+  // edi: length of source FixedArray (smi-tagged)
+  __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
+  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
+
+  // eax: destination FixedDoubleArray
+  // edi: number of elements
+  // edx: receiver
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
+  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+  __ mov(ebx, eax);
+  __ RecordWriteField(edx,
+                      JSObject::kElementsOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
+
+  // Prepare for conversion loop.
+  ExternalReference canonical_the_hole_nan_reference =
+      ExternalReference::address_of_the_hole_nan();
+  XMMRegister the_hole_nan = xmm1;
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(the_hole_nan,
+              Operand::StaticVariable(canonical_the_hole_nan_reference));
+  }
+  __ jmp(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  // Restore registers before jumping into runtime.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(ebx);
+  __ pop(eax);
+  __ jmp(fail);
+
+  // Convert and copy elements
+  // esi: source FixedArray
+  __ bind(&loop);
+  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
+  // ebx: current element from source
+  // edi: index of current element
+  __ JumpIfNotSmi(ebx, &convert_hole);
+
+  // Normal smi, convert it to double and store.
+  __ SmiUntag(ebx);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ cvtsi2sd(xmm0, ebx);
+    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+              xmm0);
+  } else {
+    __ push(ebx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ebx);
+    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  __ jmp(&entry);
+
+  // Found hole, store hole_nan_as_double instead.
+  __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+    __ Assert(equal, "object found in smi-only array");
+  }
+
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+              the_hole_nan);
+  } else {
+    __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
+    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
+  }
+
+  __ bind(&entry);
+  __ sub(edi, Immediate(Smi::FromInt(1)));
+  __ j(not_sign, &loop);
+
+  __ pop(ebx);
+  __ pop(eax);
+  // eax: value
+  // ebx: target map
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Restore esi.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ebx    : target map
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required;
+  __ push(eax);
+  __ push(edx);
+  __ push(ebx);
+
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+
+  // Allocate new FixedArray.
+  // ebx: length of source FixedDoubleArray (smi-tagged)
+  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+
+  // eax: destination FixedArray
+  // ebx: number of elements
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+
+  __ jmp(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ pop(ebx);
+  __ pop(edx);
+  __ pop(eax);
+  __ jmp(fail);
+
+  // Box doubles into heap numbers.
+  // edi: source FixedDoubleArray
+  // eax: destination FixedArray
+  __ bind(&loop);
+  // ebx: index of current element (smi-tagged)
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(equal, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
+  // edx: new heap number
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ movdbl(xmm0,
+              FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+    __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
+    __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
+    __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
+    __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
+  }
+  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
+  __ mov(esi, ebx);
+  __ RecordWriteArray(eax,
+                      edx,
+                      esi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&entry, Label::kNear);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
+         masm->isolate()->factory()->the_hole_value());
+
+  __ bind(&entry);
+  __ sub(ebx, Immediate(Smi::FromInt(1)));
+  __ j(not_sign, &loop);
+
+  __ pop(ebx);
+  __ pop(edx);
+  // ebx: target map
+  // edx: receiver
+  // Set transitioned map.
+  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+  __ RecordWriteField(edx,
+                      HeapObject::kMapOffset,
+                      ebx,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
+  __ RecordWriteField(edx,
+                      JSObject::kElementsOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Restore registers.
+  __ pop(eax);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Factory* factory,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, result);
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+         Immediate(factory->empty_string()));
+  __ j(not_equal, call_runtime);
+  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label seq_string;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result, Immediate(kStringRepresentationMask));
+  __ j(zero, &seq_string, Label::kNear);
+
+  // Handle external strings.
+  Label ascii_external, done;
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ test(result, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ test_b(result, kShortExternalStringMask);
+  __ j(not_zero, call_runtime);
+  // Check encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ test_b(result, kStringEncodingMask);
+  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+  __ j(not_equal, &ascii_external, Label::kNear);
+  // Two-byte string.
+  __ movzx_w(result, Operand(result, index, times_2, 0));
+  __ jmp(&done, Label::kNear);
+  __ bind(&ascii_external);
+  // Ascii string.
+  __ movzx_b(result, Operand(result, index, times_1, 0));
+  __ jmp(&done, Label::kNear);
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii;
+  __ bind(&seq_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // Ascii string.
+  // Load the byte into the result register.
+  __ bind(&ascii);
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
+}
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index c85fa83..f4ab0b5 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -72,6 +72,22 @@
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Factory* factory,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2389948..2649560 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -100,63 +100,64 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      if (FLAG_debug_code) {
-        __ test(reg, Immediate(0xc0000000));
-        __ Assert(zero, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
       }
-      __ SmiTag(reg);
-      __ push(reg);
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ test(reg, Immediate(0xc0000000));
+          __ Assert(zero, "Unable to encode value as smi");
+        }
+        __ SmiTag(reg);
+        __ push(reg);
+      }
     }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(eax, Immediate(0));  // No arguments.
-  __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+    __ Set(eax, Immediate(0));  // No arguments.
+    __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values containing object pointers from the expression
-  // stack.
-  for (int i = kNumJSCallerSaved; --i >= 0;) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, Immediate(kDebugZapValue));
+    // Restore the register values containing object pointers from the
+    // expression stack.
+    for (int i = kNumJSCallerSaved; --i >= 0;) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, Immediate(kDebugZapValue));
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+        __ SmiUntag(reg);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-      __ SmiUntag(reg);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
   }
 
   // Now that the break point has been handled, resume normal execution by
@@ -243,12 +244,12 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
   // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- edi: function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0, false);
+  Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
 }
 
 
@@ -298,7 +299,7 @@
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
 
   // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 const bool Debug::kFrameDropperSupported = true;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 080ad64..eeee4f2 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -99,7 +99,7 @@
         new_reloc->GetDataStartAddress() + padding, 0);
     intptr_t comment_string
         = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
-    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
+    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
     for (int i = 0; i < additional_comments; ++i) {
 #ifdef DEBUG
       byte* pos_before = reloc_info_writer.pos();
@@ -156,7 +156,8 @@
     // We use RUNTIME_ENTRY for deoptimization bailouts.
     RelocInfo rinfo(call_address + 1,  // 1 after the call opcode.
                     RelocInfo::RUNTIME_ENTRY,
-                    reinterpret_cast<intptr_t>(deopt_entry));
+                    reinterpret_cast<intptr_t>(deopt_entry),
+                    NULL);
     reloc_info_writer.Write(&rinfo);
     ASSERT_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
@@ -188,6 +189,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -199,7 +205,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -228,10 +235,14 @@
   *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -246,6 +257,9 @@
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -393,7 +407,14 @@
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
     // Setup the frame pointer and the context pointer.
-    output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+    // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+    int frame_pointer = input_->GetRegister(ebp.code());
+    if ((frame_pointer & 0x4) == 0) {
+      // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
+      frame_pointer -= kPointerSize;
+      has_alignment_padding_ = 1;
+    }
+    output_[0]->SetRegister(ebp.code(), frame_pointer);
     output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
 
     unsigned pc_offset = data->OsrPcOffset()->value();
@@ -458,9 +479,11 @@
   // top address and the current frame's size.
   uint32_t top_address;
   if (is_bottommost) {
-    // 2 = context and function in the frame.
-    top_address =
-        input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+    // If the optimized frame had alignment padding, adjust the frame pointer
+    // to point to the new position of the old frame pointer after padding
+    // is removed. Subtract 2 * kPointerSize for the context and function slots.
+    top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+        height_in_bytes + has_alignment_padding_ * kPointerSize;
   } else {
     top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
   }
@@ -511,7 +534,9 @@
   }
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+  ASSERT(!is_bottommost ||
+      input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
+      == fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
   if (FLAG_trace_deopt) {
@@ -616,7 +641,7 @@
 
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
-  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  __ sub(esp, Immediate(kDoubleRegsSize));
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
@@ -640,7 +665,7 @@
     __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
     __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
   }
-  __ sub(edx, Operand(ebp));
+  __ sub(edx, ebp);
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
@@ -653,7 +678,10 @@
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
   __ mov(Operand(esp, 5 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
@@ -676,15 +704,15 @@
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
   } else {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
   }
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
-  __ add(ecx, Operand(esp));
+  __ add(ecx, esp);
 
   // Unwind the stack down to - but not including - the unwinding
   // limit and copy the contents of the activation frame to the input
@@ -693,18 +721,43 @@
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
-  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
-  __ cmp(ecx, Operand(esp));
+  __ add(edx, Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
+  // If frame was dynamically aligned, pop padding.
+  Label sentinel, sentinel_done;
+  __ pop(ecx);
+  __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+  __ j(equal, &sentinel);
+  __ push(ecx);
+  __ jmp(&sentinel_done);
+  __ bind(&sentinel);
+  __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+         Immediate(1));
+  __ bind(&sentinel_done);
   // Compute the output frame in the deoptimizer.
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(eax);
 
+  if (type() == OSR) {
+    // If alignment padding is added, push the sentinel.
+    Label no_osr_padding;
+    __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+           Immediate(0));
+    __ j(equal, &no_osr_padding, Label::kNear);
+    __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+    __ bind(&no_osr_padding);
+  }
+
+
   // Replace the current frame with the output frames.
   Label outer_push_loop, inner_push_loop;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
@@ -717,12 +770,12 @@
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
-  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
-  __ add(Operand(eax), Immediate(kPointerSize));
-  __ cmp(eax, Operand(edx));
+  __ add(eax, Immediate(kPointerSize));
+  __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index a936277..da22390 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -55,6 +55,7 @@
 
 
 static const ByteMnemonic two_operands_instr[] = {
+  {0x01, "add", OPER_REG_OP_ORDER},
   {0x03, "add", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
@@ -117,6 +118,19 @@
 };
 
 
+// Generally we don't want to generate these because they are subject to partial
+// register stalls.  They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub.  Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+  {0x0c, "or", UNSET_OP_ORDER},
+  {0x24, "and", UNSET_OP_ORDER},
+  {0x34, "xor", UNSET_OP_ORDER},
+  {0x3c, "cmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
 static const char* const jump_conditional_mnem[] = {
   /*0*/ "jo", "jno", "jc", "jnc",
   /*4*/ "jz", "jnz", "jna", "ja",
@@ -149,7 +163,8 @@
   REGISTER_INSTR,
   MOVE_REG_INSTR,
   CALL_JUMP_INSTR,
-  SHORT_IMMEDIATE_INSTR
+  SHORT_IMMEDIATE_INSTR,
+  BYTE_IMMEDIATE_INSTR
 };
 
 
@@ -164,6 +179,10 @@
  public:
   InstructionTable();
   const InstructionDesc& Get(byte x) const { return instructions_[x]; }
+  static InstructionTable* get_instance() {
+    static InstructionTable table;
+    return &table;
+  }
 
  private:
   InstructionDesc instructions_[256];
@@ -198,6 +217,7 @@
   CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
   CopyTable(call_jump_instr, CALL_JUMP_INSTR);
   CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
   AddJumpConditionalShort();
   SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
   SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -243,15 +263,13 @@
 }
 
 
-static InstructionTable instruction_table;
-
-
 // The IA32 disassembler implementation.
 class DisassemblerIA32 {
  public:
   DisassemblerIA32(const NameConverter& converter,
                    bool abort_on_unimplemented = true)
       : converter_(converter),
+        instruction_table_(InstructionTable::get_instance()),
         tmp_buffer_pos_(0),
         abort_on_unimplemented_(abort_on_unimplemented) {
     tmp_buffer_[0] = '\0';
@@ -265,11 +283,11 @@
 
  private:
   const NameConverter& converter_;
+  InstructionTable* instruction_table_;
   v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
   unsigned int tmp_buffer_pos_;
   bool abort_on_unimplemented_;
 
-
   enum {
     eax = 0,
     ecx = 1,
@@ -868,7 +886,7 @@
   }
   bool processed = true;  // Will be set to false if the current instruction
                           // is not in 'instructions' table.
-  const InstructionDesc& idesc = instruction_table.Get(*data);
+  const InstructionDesc& idesc = instruction_table_->Get(*data);
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
       AppendToBuffer(idesc.mnem);
@@ -912,6 +930,12 @@
       break;
     }
 
+    case BYTE_IMMEDIATE_INSTR: {
+      AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+      data += 2;
+      break;
+    }
+
     case NO_INSTR:
       processed = false;
       break;
@@ -1346,11 +1370,6 @@
         data += 2;
         break;
 
-      case 0x2C:
-        AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
-        data += 2;
-        break;
-
       case 0xA9:
         AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
         data += 5;
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 2f1b2a9..45b847a 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -34,37 +34,37 @@
 
 // Register lists
 // Note that the bit values must match those used in actual instruction encoding
-static const int kNumRegs = 8;
+const int kNumRegs = 8;
 
 
 // Caller-saved registers
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 0 |  // eax
   1 << 1 |  // ecx
   1 << 2 |  // edx
   1 << 3 |  // ebx - used as a caller-saved register in JavaScript code
   1 << 7;   // edi - callee function
 
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 
 // Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 8;
+const int kNumSafepointRegisters = 8;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kContextOffset = 1 * kPointerSize;
-  static const int kFPOffset      = 2 * kPointerSize;
-  static const int kStateOffset   = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ca6ce6e..ef4f0c5 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -44,11 +44,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -122,6 +117,8 @@
   ASSERT(info_ == NULL);
   info_ = info;
   scope_ = info->scope();
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -136,17 +133,26 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
-    __ test(ecx, Operand(ecx));
+    __ test(ecx, ecx);
     __ j(zero, &ok, Label::kNear);
     // +1 for return address.
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(ecx, Operand(esp, receiver_offset));
+    __ JumpIfSmi(ecx, &ok);
+    __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+    __ j(not_equal, &ok, Label::kNear);
     __ mov(Operand(esp, receiver_offset),
            Immediate(isolate()->factory()->undefined_value()));
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -164,11 +170,6 @@
     }
   }
 
-  set_stack_height(2 + scope()->num_stack_slots());
-  if (FLAG_verify_stack_height) {
-    verify_stack_height();
-  }
-
   bool function_in_register = true;
 
   // Possibly allocate a local context.
@@ -200,11 +201,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
   }
@@ -230,7 +232,7 @@
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -260,7 +262,10 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -363,15 +368,6 @@
 }
 
 
-void FullCodeGenerator::verify_stack_height() {
-  ASSERT(FLAG_verify_stack_height);
-  __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
-  __ cmp(ebp, Operand(esp));
-  __ Assert(equal, "Full codegen stack height not as expected.");
-  __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
@@ -388,14 +384,13 @@
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -442,12 +437,11 @@
   } else {
     __ push(Immediate(lit));
   }
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -480,7 +474,6 @@
                                                    Register reg) const {
   ASSERT(count > 0);
   __ Drop(count);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -490,7 +483,6 @@
   ASSERT(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -499,7 +491,6 @@
   ASSERT(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
-  codegen()->decrement_stack_height(count - 1);
 }
 
 
@@ -509,9 +500,8 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
-  codegen()->decrement_stack_height(count);
 }
 
 
@@ -545,7 +535,6 @@
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
-  codegen()->increment_stack_height();
 }
 
 
@@ -573,12 +562,11 @@
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
   __ push(Immediate(value));
-  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -597,7 +585,7 @@
   ToBooleanStub stub(result_register());
   __ push(result_register());
   __ CallStub(&stub, condition->test_id());
-  __ test(result_register(), Operand(result_register()));
+  __ test(result_register(), result_register());
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -661,16 +649,17 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
     ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -681,13 +670,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -697,13 +680,15 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
+                                        VariableMode mode,
                                         FunctionLiteral* function,
                                         int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -715,7 +700,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(StackOperand(variable), result_register());
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(StackOperand(variable),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -738,11 +723,16 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(ContextOperand(esi, variable->index()), result_register());
-        int offset = Context::SlotOffset(variable->index());
-        __ mov(ebx, esi);
-        __ RecordWrite(ebx, offset, result_register(), ecx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(esi,
+                                  Context::SlotOffset(variable->index()),
+                                  result_register(),
+                                  ecx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -755,28 +745,26 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(esi);
       __ push(Immediate(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+          ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      increment_stack_height(3);
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ push(Immediate(isolate()->factory()->the_hole_value()));
-        increment_stack_height();
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
-        increment_stack_height();
       }
       __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-      decrement_stack_height(4);
       break;
     }
   }
@@ -801,7 +789,6 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
 
-  int switch_clause_stack_height = stack_height();
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -835,10 +822,10 @@
     if (inline_smi_code) {
       Label slow_case;
       __ mov(ecx, edx);
-      __ or_(ecx, Operand(eax));
+      __ or_(ecx, eax);
       patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
-      __ cmp(edx, Operand(eax));
+      __ cmp(edx, eax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
       __ jmp(clause->body_target());
@@ -850,7 +837,7 @@
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
     __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
     __ jmp(clause->body_target());
@@ -866,7 +853,6 @@
     __ jmp(default_clause->body_target());
   }
 
-  set_stack_height(switch_clause_stack_height);
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
@@ -908,13 +894,18 @@
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(eax);
-  increment_stack_height();
+
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+  __ j(below_equal, &call_runtime);
 
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
+  Label next;
   __ mov(ecx, eax);
   __ bind(&next);
 
@@ -939,7 +930,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   Label check_prototype;
-  __ cmp(ecx, Operand(eax));
+  __ cmp(ecx, eax);
   __ j(equal, &check_prototype, Label::kNear);
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmp(edx, isolate()->factory()->empty_fixed_array());
@@ -985,15 +976,21 @@
   __ jmp(&loop);
 
   // We got a fixed array in register eax. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ push(Immediate(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ push(eax);
+  __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
+  __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
+  __ j(above, &non_proxy);
+  __ mov(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ push(ebx);  // Smi
+  __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
-  // 1 ~ The object has already been pushed.
-  increment_stack_height(ForIn::kElementCount - 1);
   // Generate code for doing the condition check.
   __ bind(&loop);
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
@@ -1004,26 +1001,32 @@
   __ mov(ebx, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register edx.
   __ mov(edx, Operand(esp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  ASSERT(Smi::FromInt(0) == 0);
+  __ test(edx, edx);
+  __ j(zero, &update_each);
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(equal, loop_statement.continue_label());
-  __ mov(ebx, Operand(eax));
+  __ mov(ebx, eax);
 
   // Update the 'each' property or variable from the possibly filtered
   // entry in register ebx.
@@ -1047,9 +1050,8 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(Operand(esp), Immediate(5 * kPointerSize));
+  __ add(esp, Immediate(5 * kPointerSize));
 
-  decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
   __ bind(&exit);
   decrement_loop_depth();
@@ -1069,7 +1071,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ push(Immediate(info));
     __ CallStub(&stub);
   } else {
@@ -1099,7 +1101,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1113,7 +1115,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1158,7 +1160,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1189,16 +1191,23 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ cmp(eax, isolate()->factory()->the_hole_value());
       __ j(not_equal, done);
-      __ mov(eax, isolate()->factory()->undefined_value());
+      if (local->mode() == CONST) {
+        __ mov(eax, isolate()->factory()->undefined_value());
+      } else {  // LET || CONST_HARMONY
+        __ push(Immediate(var->name()));
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1231,23 +1240,63 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(eax, var);
-        __ cmp(eax, isolate()->factory()->the_hole_value());
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == Variable::LET) {
-          __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {  // Variable::CONST
-          __ mov(eax, isolate()->factory()->undefined_value());
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        __ bind(&done);
-        context()->Plug(eax);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(eax, var);
+          __ cmp(eax, isolate()->factory()->the_hole_value());
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ push(Immediate(var->name()));
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ mov(eax, isolate()->factory()->undefined_value());
+          }
+          __ bind(&done);
+          context()->Plug(eax);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1325,10 +1374,11 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(expr->constant_properties()));
+  __ push(Immediate(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1336,10 +1386,15 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1360,7 +1415,6 @@
     if (!result_saved) {
       __ push(eax);  // Save result on the stack
       result_saved = true;
-      increment_stack_height();
     }
     switch (property->kind()) {
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1372,9 +1426,9 @@
             VisitForAccumulatorValue(value);
             __ mov(ecx, Immediate(key->handle()));
             __ mov(edx, Operand(esp, 0));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
             __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1385,7 +1439,6 @@
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1394,20 +1447,16 @@
         } else {
           __ Drop(3);
         }
-        decrement_stack_height(3);
         break;
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        increment_stack_height();
         VisitForStackValue(key);
         __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
                           Smi::FromInt(1) :
                           Smi::FromInt(0)));
-        increment_stack_height();
         VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
-        decrement_stack_height(4);
         break;
       default: UNREACHABLE();
     }
@@ -1432,25 +1481,42 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(expr->constant_elements()));
-  if (expr->constant_elements()->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    ASSERT(expr->depth() == 1);
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  __ push(Immediate(constant_elements));
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1470,18 +1536,31 @@
     if (!result_saved) {
       __ push(eax);
       result_saved = true;
-      increment_stack_height();
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
-    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ mov(FieldOperand(ebx, offset), result_register());
-
-    // Update the write barrier for the array store.
-    __ RecordWrite(ebx, offset, result_register(), ecx);
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+      // transition and don't need to call the runtime stub.
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+      __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+      // Store the subexpression value in the array's elements.
+      __ mov(FieldOperand(ebx, offset), result_register());
+      // Update the write barrier for the array store.
+      __ RecordWriteField(ebx, offset, result_register(), ecx,
+                          kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          INLINE_SMI_CHECK);
+    } else {
+      // Store the subexpression value in the array's elements.
+      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+      __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+      __ mov(ecx, Immediate(Smi::FromInt(i)));
+      __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1499,9 +1578,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // on the left-hand side.
   if (!expr->target()->IsValidLeftHandSide()) {
-    ASSERT(expr->target()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->target());  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr->target());
     return;
   }
 
@@ -1526,7 +1603,6 @@
         // We need the receiver both on the stack and in the accumulator.
         VisitForAccumulatorValue(property->obj());
         __ push(result_register());
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1537,7 +1613,6 @@
         VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
-        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1569,7 +1644,6 @@
 
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
-    increment_stack_height();
     VisitForAccumulatorValue(expr->value());
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1619,14 +1693,14 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1639,9 +1713,8 @@
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
   __ pop(edx);
-  decrement_stack_height();
   __ mov(ecx, eax);
-  __ or_(eax, Operand(edx));
+  __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
   patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
@@ -1691,32 +1764,32 @@
       break;
     }
     case Token::ADD:
-      __ add(eax, Operand(ecx));
+      __ add(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::SUB:
-      __ sub(eax, Operand(ecx));
+      __ sub(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::MUL: {
       __ SmiUntag(eax);
-      __ imul(eax, Operand(ecx));
+      __ imul(eax, ecx);
       __ j(overflow, &stub_call);
-      __ test(eax, Operand(eax));
+      __ test(eax, eax);
       __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
-      __ or_(ebx, Operand(ecx));
+      __ or_(ebx, ecx);
       __ j(negative, &stub_call);
       break;
     }
     case Token::BIT_OR:
-      __ or_(eax, Operand(ecx));
+      __ or_(eax, ecx);
       break;
     case Token::BIT_AND:
-      __ and_(eax, Operand(ecx));
+      __ and_(eax, ecx);
       break;
     case Token::BIT_XOR:
-      __ xor_(eax, Operand(ecx));
+      __ xor_(eax, ecx);
       break;
     default:
       UNREACHABLE();
@@ -1731,7 +1804,6 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  decrement_stack_height();
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
@@ -1744,9 +1816,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
-    ASSERT(expr->AsThrow() != NULL);
-    VisitInCurrentContext(expr);  // Throw does not plug the context
-    context()->Plug(eax);
+    VisitForEffect(expr);
     return;
   }
 
@@ -1770,31 +1840,26 @@
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
       __ mov(ecx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      increment_stack_height();
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
       __ pop(edx);
-      decrement_stack_height();
       __ pop(eax);  // Restore value.
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
@@ -1810,9 +1875,9 @@
     // Global var, const, or let.
     __ mov(ecx, var->name());
     __ mov(edx, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1838,13 +1903,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ push(Immediate(Smi::FromInt(language_mode())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1859,12 +1924,14 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, ecx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1877,14 +1944,15 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+      __ push(Immediate(Smi::FromInt(language_mode())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -1915,11 +1983,10 @@
     __ mov(edx, Operand(esp, 0));
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1929,7 +1996,6 @@
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
     __ Drop(1);
-    decrement_stack_height();
   }
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -1951,18 +2017,16 @@
   }
 
   __ pop(ecx);
-  decrement_stack_height();
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
     __ pop(edx);
-    decrement_stack_height();
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1972,7 +2036,6 @@
     __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    decrement_stack_height();
   }
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -1992,7 +2055,6 @@
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
-    decrement_stack_height();
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2019,7 +2081,6 @@
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -2034,7 +2095,6 @@
   __ pop(ecx);
   __ push(eax);
   __ push(ecx);
-  increment_stack_height();
 
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2053,7 +2113,6 @@
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
 }
 
@@ -2069,19 +2128,38 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  bool record_call_target = !Serializer::enabled();
+  if (record_call_target) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+  }
   CallFunctionStub stub(arg_count, flags);
-  __ CallStub(&stub);
+  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+  __ CallStub(&stub, expr->id());
+  if (record_call_target) {
+    // There is a one element cache in the instruction stream.
+#ifdef DEBUG
+    int return_site_offset = masm()->pc_offset();
+#endif
+    Handle<Object> uninitialized =
+        CallFunctionStub::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    __ test(eax, Immediate(cell));
+    // Patching code in the stub assumes the opcode is 1 byte and there is
+    // word for a pointer in the operand.
+    ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
+  }
+
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2091,18 +2169,14 @@
 
   // Push the receiver of the enclosing function.
   __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+  // Push the language mode.
+  __ push(Immediate(Smi::FromInt(language_mode())));
 
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ push(Immediate(Smi::FromInt(strict_mode)));
+  // Push the start position of the scope the calls resides in.
+  __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2128,33 +2202,15 @@
       VisitForStackValue(callee);
       // Reserved receiver slot.
       __ push(Immediate(isolate()->factory()->undefined_value()));
-      increment_stack_height();
       // Push the arguments.
       for (int i = 0; i < arg_count; i++) {
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(eax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
@@ -2164,17 +2220,16 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    decrement_stack_height(arg_count + 1);  // Function is left on the stack.
     context()->DropAndPlug(1, eax);
 
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ push(GlobalObjectOperand());
-    increment_stack_height();
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2193,7 +2248,6 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
-    increment_stack_height(2);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2201,8 +2255,7 @@
       Label call;
       __ jmp(&call, Label::kNear);
       __ bind(&done);
-      // Push function.  Stack height already incremented in slow case
-      // above.
+      // Push function.
       __ push(eax);
       // The receiver is implicitly the global receiver. Indicate this by
       // passing the hole to the call function stub.
@@ -2235,7 +2288,6 @@
     // Load global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-    increment_stack_height();
     // Emit function call.
     EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
@@ -2276,13 +2328,12 @@
   Handle<Code> construct_builtin =
       isolate()->builtins()->JSConstructCall();
   __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
-  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2294,7 +2345,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2302,7 +2353,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2314,7 +2366,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2322,7 +2374,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2346,14 +2399,15 @@
   __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2367,14 +2421,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2390,7 +2445,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2398,7 +2453,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2438,9 +2494,9 @@
   STATIC_ASSERT(kPointerSize == 4);
   __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
   // Calculate location of the first key name.
-  __ add(Operand(ebx),
-           Immediate(FixedArray::kHeaderSize +
-                     DescriptorArray::kFirstIndex * kPointerSize));
+  __ add(ebx,
+         Immediate(FixedArray::kHeaderSize +
+                   DescriptorArray::kFirstIndex * kPointerSize));
   // Loop through all the keys in the descriptor array. If one of these is the
   // symbol valueOf the result is false.
   Label entry, loop;
@@ -2449,9 +2505,9 @@
   __ mov(edx, FieldOperand(ebx, 0));
   __ cmp(edx, FACTORY->value_of_symbol());
   __ j(equal, if_false);
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ bind(&entry);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(not_equal, &loop);
 
   // Reload map as register ebx was used as temporary above.
@@ -2475,12 +2531,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2494,14 +2551,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2515,14 +2573,15 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2536,7 +2595,7 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2544,8 +2603,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2568,14 +2627,15 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2590,16 +2650,16 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
-  decrement_stack_height();
-  __ cmp(eax, Operand(ebx));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ cmp(eax, ebx);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2613,8 +2673,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2636,7 +2696,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2647,20 +2708,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
   // Map is now in eax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &non_function_constructor);
@@ -2692,7 +2757,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2700,12 +2765,12 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
-    decrement_stack_height(2);
   }
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, isolate()->factory()->undefined_value());
@@ -2713,8 +2778,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2730,9 +2795,10 @@
   __ bind(&heapnumber_allocated);
 
   __ PrepareCallCFunction(1, ebx);
-  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
-                   1);
+  __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+  __ mov(Operand(esp, 0), eax);
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
@@ -2741,8 +2807,8 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, Operand(ebx));
-    __ movd(xmm0, Operand(eax));
+    __ movd(xmm1, ebx);
+    __ movd(xmm0, eax);
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
@@ -2763,34 +2829,35 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  decrement_stack_height(4);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2808,8 +2875,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2820,18 +2888,17 @@
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
   }
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
-  decrement_stack_height();
 
   Label done;
   // If the object is a smi, return the value.
@@ -2843,17 +2910,19 @@
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2861,12 +2930,12 @@
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2884,7 +2953,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2892,18 +2962,15 @@
 
   Register object = ebx;
   Register index = eax;
-  Register scratch = ecx;
   Register result = edx;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2932,7 +2999,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2940,20 +3008,17 @@
 
   Register object = ebx;
   Register index = eax;
-  Register scratch1 = ecx;
-  Register scratch2 = edx;
+  Register scratch = edx;
   Register result = eax;
 
   __ pop(object);
-  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2982,7 +3047,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2990,12 +3056,12 @@
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3003,58 +3069,70 @@
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3063,31 +3141,43 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
+  __ j(equal, &proxy);
+
   // InvokeFunction requires the function in edi. Move it in there.
   __ mov(edi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(edi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height(arg_count + 1);
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(eax);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3119,14 +3209,14 @@
   __ mov(index_1, Operand(esp, 1 * kPointerSize));
   __ mov(index_2, Operand(esp, 0));
   __ mov(temp, index_1);
-  __ or_(temp, Operand(index_2));
+  __ or_(temp, index_2);
   __ JumpIfNotSmi(temp, &slow_case);
 
   // Check that both indices are valid.
   __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ cmp(temp, Operand(index_1));
+  __ cmp(temp, index_1);
   __ j(below_equal, &slow_case);
-  __ cmp(temp, Operand(index_2));
+  __ cmp(temp, index_2);
   __ j(below_equal, &slow_case);
 
   // Bring addresses into index1 and index2.
@@ -3139,16 +3229,35 @@
   __ mov(Operand(index_2, 0), object);
   __ mov(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ mov(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
   __ mov(eax, isolate()->factory()->undefined_value());
   __ jmp(&done);
 
@@ -3156,12 +3265,12 @@
   __ CallRuntime(Runtime::kSwapElements, 3);
 
   __ bind(&done);
-  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3209,7 +3318,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = eax;
@@ -3221,11 +3331,11 @@
   __ pop(left);
 
   Label done, fail, ok;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(equal, &ok);
   // Fail if either is a non-HeapObject.
   __ mov(tmp, left);
-  __ and_(Operand(tmp), right);
+  __ and_(tmp, right);
   __ JumpIfSmi(tmp, &fail);
   __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3242,12 +3352,12 @@
   __ mov(eax, Immediate(isolate()->factory()->true_value()));
   __ bind(&done);
 
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3265,14 +3375,15 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3287,11 +3398,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3316,7 +3428,7 @@
   Operand separator_operand = Operand(esp, 2 * kPointerSize);
   Operand result_operand = Operand(esp, 1 * kPointerSize);
   Operand array_length_operand = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ sub(esp, Immediate(2 * kPointerSize));
   __ cld();
   // Check that the array is a JSArray
   __ JumpIfSmi(array, &bailout);
@@ -3352,7 +3464,7 @@
   // Live loop registers: index, array_length, string,
   //                      scratch, string_length, elements.
   if (FLAG_debug_code) {
-    __ cmp(index, Operand(array_length));
+    __ cmp(index, array_length);
     __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
   }
   __ bind(&loop);
@@ -3370,8 +3482,8 @@
   __ add(string_length,
          FieldOperand(string, SeqAsciiString::kLengthOffset));
   __ j(overflow, &bailout);
-  __ add(Operand(index), Immediate(1));
-  __ cmp(index, Operand(array_length));
+  __ add(index, Immediate(1));
+  __ cmp(index, array_length);
   __ j(less, &loop);
 
   // If array_length is 1, return elements[0], a string.
@@ -3405,10 +3517,10 @@
   // to string_length.
   __ mov(scratch, separator_operand);
   __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
+  __ sub(string_length, scratch);  // May be negative, temporarily.
   __ imul(scratch, array_length_operand);
   __ j(overflow, &bailout);
-  __ add(string_length, Operand(scratch));
+  __ add(string_length, scratch);
   __ j(overflow, &bailout);
 
   __ shr(string_length, 1);
@@ -3449,7 +3561,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
   __ bind(&loop_1_condition);
   __ cmp(index, array_length_operand);
   __ j(less, &loop_1);  // End while (index < length).
@@ -3490,7 +3602,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_2);  // End while (index < length).
@@ -3531,7 +3643,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_3);  // End while (index < length).
@@ -3543,10 +3655,9 @@
   __ bind(&done);
   __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3566,7 +3677,6 @@
     // Prepare for calling JS runtime function.
     __ mov(eax, GlobalObjectOperand());
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
-    increment_stack_height();
   }
 
   // Push the arguments ("left-to-right").
@@ -3588,11 +3698,6 @@
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
-  decrement_stack_height(arg_count);
-  if (expr->is_jsruntime()) {
-    decrement_stack_height();
-  }
-
   context()->Plug(eax);
 }
 
@@ -3607,15 +3712,16 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ push(Immediate(Smi::FromInt(strict_mode_flag)));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        decrement_stack_height(2);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
@@ -3657,18 +3763,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->true_value());
+        } else {
+          __ push(isolate()->factory()->true_value());
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ mov(eax, isolate()->factory()->false_value());
+        } else {
+          __ push(isolate()->factory()->false_value());
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3679,7 +3808,6 @@
         VisitForTypeofValue(expr->expression());
       }
       __ CallRuntime(Runtime::kTypeof, 1);
-      decrement_stack_height();
       context()->Plug(eax);
       break;
     }
@@ -3733,10 +3861,7 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // as the left-hand side.
   if (!expr->expression()->IsValidLeftHandSide()) {
-    ASSERT(expr->expression()->AsThrow() != NULL);
-    VisitInCurrentContext(expr->expression());
-    // Visiting Throw does not plug the context.
-    context()->Plug(eax);
+    VisitForEffect(expr->expression());
     return;
   }
 
@@ -3761,20 +3886,17 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
-      increment_stack_height();
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
-      increment_stack_height();
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
-      increment_stack_height();
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -3805,7 +3927,6 @@
       switch (assign_type) {
         case VARIABLE:
           __ push(eax);
-          increment_stack_height();
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -3823,9 +3944,9 @@
 
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     }
     __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
@@ -3835,9 +3956,9 @@
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     }
   }
 
@@ -3879,10 +4000,9 @@
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3897,11 +4017,9 @@
     case KEYED_PROPERTY: {
       __ pop(ecx);
       __ pop(edx);
-      decrement_stack_height();
-      decrement_stack_height();
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3950,20 +4068,25 @@
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(eax, if_true);
@@ -3998,8 +4121,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(eax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4017,18 +4143,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4036,9 +4151,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4046,21 +4164,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
-  switch (expr->op()) {
+  switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4069,9 +4179,8 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      decrement_stack_height(2);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ test(eax, eax);
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
       break;
@@ -4084,43 +4193,34 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
-          __ pop(edx);
           break;
         case Token::LT:
           cc = less;
-          __ pop(edx);
           break;
         case Token::GT:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = less;
-          __ mov(edx, result_register());
-          __ pop(eax);
+          cc = greater;
          break;
         case Token::LTE:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = greater_equal;
-          __ mov(edx, result_register());
-          __ pop(eax);
+          cc = less_equal;
           break;
         case Token::GTE:
           cc = greater_equal;
-          __ pop(edx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      decrement_stack_height();
+      __ pop(edx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         Label slow_case;
-        __ mov(ecx, Operand(edx));
-        __ or_(ecx, Operand(eax));
+        __ mov(ecx, edx);
+        __ or_(ecx, eax);
         patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-        __ cmp(edx, Operand(eax));
+        __ cmp(edx, eax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
@@ -4131,8 +4231,8 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ test(eax, eax);
       Split(cc, if_true, if_false, fall_through);
     }
   }
@@ -4143,7 +4243,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4151,15 +4253,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->null_value());
-  if (expr->is_strict()) {
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Handle<Object> nil_value = nil == kNullValue ?
+      isolate()->factory()->null_value() :
+      isolate()->factory()->undefined_value();
+  __ cmp(eax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Handle<Object> other_nil_value = nil == kNullValue ?
+        isolate()->factory()->undefined_value() :
+        isolate()->factory()->null_value();
     __ j(equal, if_true);
-    __ cmp(eax, isolate()->factory()->undefined_value());
+    __ cmp(eax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(eax, if_false);
     // It can be an undetectable object.
@@ -4226,7 +4333,7 @@
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
   __ pop(edx);
-  __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+  __ sub(edx, Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   STATIC_ASSERT(kSmiTag == 0);
   __ SmiTag(edx);
@@ -4242,8 +4349,8 @@
   // Uncook return address.
   __ pop(edx);
   __ SmiUntag(edx);
-  __ add(Operand(edx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(edx));
+  __ add(edx, Immediate(masm_->CodeObject()));
+  __ jmp(edx);
 }
 
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 9b5cc56..e93353e 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -212,7 +212,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
 
@@ -326,7 +326,7 @@
   // Fast case: Do the load.
   STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
+  __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ j(equal, out_of_range);
@@ -394,8 +394,8 @@
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
-  __ cmp(key, Operand(scratch2));
+  __ sub(scratch2, Immediate(Smi::FromInt(2)));
+  __ cmp(key, scratch2);
   __ j(greater_equal, unmapped_case);
 
   // Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@
   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
   __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
   __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch));
+  __ cmp(key, scratch);
   __ j(greater_equal, slow_case);
   return FieldOperand(backing_store,
                       key,
@@ -534,7 +534,7 @@
   __ shr(ecx, KeyedLookupCache::kMapHashShift);
   __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
   __ shr(edi, String::kHashShift);
-  __ xor_(ecx, Operand(edi));
+  __ xor_(ecx, edi);
   __ and_(ecx, KeyedLookupCache::kCapacityMask);
 
   // Load the key (consisting of map and symbol) from the cache and
@@ -545,7 +545,7 @@
   __ shl(edi, kPointerSizeLog2 + 1);
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
-  __ add(Operand(edi), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
   __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
@@ -559,12 +559,12 @@
   __ mov(edi,
          Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-  __ sub(edi, Operand(ecx));
+  __ sub(edi, ecx);
   __ j(above_equal, &property_array_property);
 
   // Load in-object property.
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(ecx, Operand(edi));
+  __ add(ecx, edi);
   __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
   __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
   __ ret(0);
@@ -606,14 +606,12 @@
 
   Register receiver = edx;
   Register index = eax;
-  Register scratch1 = ebx;
-  Register scratch2 = ecx;
+  Register scratch = ecx;
   Register result = eax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -651,8 +649,8 @@
   // Check that it has indexed interceptor and access checks
   // are not enabled for this object.
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
-  __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
-  __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+  __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+  __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
   __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
@@ -710,7 +708,7 @@
   __ mov(mapped_location, eax);
   __ lea(ecx, mapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
@@ -719,7 +717,7 @@
   __ mov(unmapped_location, eax);
   __ lea(edi, unmapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -734,7 +732,9 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label check_if_double_array, array, extra;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(edx, &slow);
@@ -750,22 +750,18 @@
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(edi, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
   // edx: JSObject
   // ecx: key (a smi)
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(below, &fast);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(below, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -778,16 +774,28 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  // edi: receiver->elements, a FixedArray
+  // ebx: receiver->elements, a FixedArray
+  // edi: receiver map
   // flags: compare (ecx, edx.length())
   // do not leave holes in the array:
   __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
-  // Add 1 to receiver->length, and go to fast array write.
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &check_if_double_array);
+  // Add 1 to receiver->length, and go to common element store code for Objects.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  // Add 1 to receiver->length, and go to common element store code for doubles.
+  __ add(FieldOperand(edx, JSArray::kLengthOffset),
+         Immediate(Smi::FromInt(1)));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -796,34 +804,64 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
 
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
+  // Check the key against the length in the array and fall through to the
+  // common store code.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
   __ j(above_equal, &extra);
 
-  // Fast case: Do the store.
-  __ bind(&fast);
+  // Fast case: Do the store, could either Object or double.
+  __ bind(&fast_object_with_map_check);
   // eax: value
   // ecx: key (a smi)
   // edx: receiver
-  // edi: FixedArray receiver->elements
-  __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
+  // ebx: FixedArray receiver->elements
+  // edi: receiver map
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(eax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+  __ ret(0);
+
+  __ bind(&non_smi_value);
+  // Escape to slow case when writing non-smi into smi-only array.
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+  __ CheckFastObjectElements(edi, &slow, Label::kNear);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
   // Update write barrier for the elements array address.
-  __ mov(edx, Operand(eax));
-  __ RecordWrite(edi, 0, edx, ecx);
+  __ mov(edx, eax);  // Preserve the value which is returned.
+  __ RecordWriteArray(
+      ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
   __ ret(0);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- edx                 : receiver
@@ -833,11 +871,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
-  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
-                                                  eax);
+  Isolate* isolate = masm->isolate();
+  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -863,9 +901,9 @@
 
   // Check for boolean.
   __ bind(&non_string);
-  __ cmp(edx, FACTORY->true_value());
+  __ cmp(edx, isolate->factory()->true_value());
   __ j(equal, &boolean);
-  __ cmp(edx, FACTORY->false_value());
+  __ cmp(edx, isolate->factory()->false_value());
   __ j(not_equal, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -873,8 +911,7 @@
 
   // Probe the stub cache for the value object.
   __ bind(&probe);
-  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
-                                                  no_reg);
+  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
   __ bind(&miss);
 }
 
@@ -904,8 +941,9 @@
                     NullCallWrapper(), CALL_AS_METHOD);
 }
 
+
 // The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -929,10 +967,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -951,22 +989,22 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(edx);
-  __ push(ecx);
+    // Push the receiver and the name of the function.
+    __ push(edx);
+    __ push(ecx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ mov(eax, Immediate(2));
-  __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ mov(eax, Immediate(2));
+    __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+    __ CallStub(&stub);
 
-  // Move result to edi and exit the internal frame.
-  __ mov(edi, eax);
-  __ LeaveInternalFrame();
+    // Move result to edi and exit the internal frame.
+    __ mov(edi, eax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -989,7 +1027,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -1003,7 +1041,7 @@
 
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_ic_state) {
+                                 Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1014,38 +1052,10 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
+                                            extra_state);
 
-  GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+  GenerateMiss(masm, argc, extra_state);
 }
 
 
@@ -1111,13 +1121,17 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(ecx);  // save the key
-  __ push(edx);  // pass the receiver
-  __ push(ecx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(ecx);  // restore the key
-  __ LeaveInternalFrame();
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ecx);  // save the key
+    __ push(edx);  // pass the receiver
+    __ push(ecx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(ecx);  // restore the key
+    // Leave the internal frame.
+  }
+
   __ mov(edi, eax);
   __ jmp(&do_call);
 
@@ -1143,10 +1157,8 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm,
-                                argc,
-                                Code::KEYED_CALL_IC,
-                                Code::kNoExtraICState);
+  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
+                                            Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1209,25 +1221,12 @@
   __ JumpIfSmi(ecx, &miss);
   Condition cond = masm->IsObjectStringType(ecx, eax, eax);
   __ j(NegateCondition(cond), &miss);
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -1536,6 +1535,51 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ebx    : target map
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ mov(eax, edx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ebx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ebx    : target map
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ mov(eax, edx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ebx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 #undef __
 
 
@@ -1547,11 +1591,9 @@
     case Token::LT:
       return less;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return less;
+      return greater;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return greater_equal;
+      return less_equal;
     case Token::GTE:
       return greater_equal;
     default:
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d5a4fe6..33adc21 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -33,6 +33,7 @@
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -70,6 +71,17 @@
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope(SSE2);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+  dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
+                             info()->osr_ast_id() != AstNode::kNoNumber;
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -133,7 +145,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ test(ecx, Operand(ecx));
     __ j(zero, &ok, Label::kNear);
@@ -144,6 +156,29 @@
     __ bind(&ok);
   }
 
+  if (dynamic_frame_alignment_) {
+    Label do_not_pad, align_loop;
+    STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+    // Align esp to a multiple of 2 * kPointerSize.
+    __ test(esp, Immediate(kPointerSize));
+    __ j(zero, &do_not_pad, Label::kNear);
+    __ push(Immediate(0));
+    __ mov(ebx, esp);
+    // Copy arguments, receiver, and return address.
+    __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+    __ bind(&align_loop);
+    __ mov(eax, Operand(ebx, 1 * kPointerSize));
+    __ mov(Operand(ebx, 0), eax);
+    __ add(Operand(ebx), Immediate(kPointerSize));
+    __ dec(ecx);
+    __ j(not_zero, &align_loop, Label::kNear);
+    __ mov(Operand(ebx, 0),
+           Immediate(isolate()->factory()->frame_alignment_marker()));
+
+    __ bind(&do_not_pad);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -204,11 +239,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -252,6 +288,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -302,6 +341,12 @@
 }
 
 
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
 Immediate LCodeGen::ToImmediate(LOperand* op) {
   LConstantOperand* const_op = LConstantOperand::cast(op);
   Handle<Object> literal = chunk_->LookupLiteral(const_op);
@@ -464,14 +509,18 @@
                                        int argc,
                                        LInstruction* instr,
                                        LOperand* context) {
-  ASSERT(context->IsRegister() || context->IsStackSlot());
   if (context->IsRegister()) {
     if (!ToRegister(context).is(esi)) {
       __ mov(esi, ToRegister(context));
     }
-  } else {
-    // Context is stack slot.
+  } else if (context->IsStackSlot()) {
     __ mov(esi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    Handle<Object> literal =
+        chunk_->LookupLiteral(LConstantOperand::cast(context));
+    LoadHeapObject(esi, Handle<Context>::cast(literal));
+  } else {
+    UNREACHABLE();
   }
 
   __ CallRuntimeSaveDoubles(id);
@@ -643,7 +692,7 @@
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint =
       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -1167,8 +1216,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+  Register reg = ToRegister(instr->result());
+  Handle<Object> handle = instr->value();
+  if (handle->IsHeapObject()) {
+    LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+  } else {
+    __ Set(reg, Immediate(handle));
+  }
 }
 
 
@@ -1494,32 +1548,40 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  if (right->IsConstantOperand()) {
-    __ cmp(ToOperand(left), ToImmediate(right));
-  } else {
-    __ cmp(ToRegister(left), ToOperand(right));
-  }
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the false block.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-  } else {
-    EmitCmpI(left, right);
-  }
-
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToRegister(left), ToImmediate(right));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToOperand(right), ToImmediate(left));
+        // We transposed the operands. Reverse the condition.
+        cc = ReverseCondition(cc);
+      } else {
+        __ cmp(ToRegister(left), ToOperand(right));
+      }
+    }
+    EmitBranch(true_block, false_block, cc);
+  }
 }
 
 
@@ -1544,23 +1606,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ cmp(reg, factory()->null_value());
-  if (instr->is_strict()) {
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Handle<Object> nil_value = instr->nil() == kNullValue ?
+      factory()->null_value() :
+      factory()->undefined_value();
+  __ cmp(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Handle<Object> other_nil_value = instr->nil() == kNullValue ?
+        factory()->undefined_value() :
+        factory()->null_value();
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ cmp(reg, factory()->undefined_value());
+    __ cmp(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1612,6 +1684,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+
+  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->InputAt(0));
 
@@ -1639,6 +1736,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  __ test(eax, Operand(eax));
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1712,28 +1844,35 @@
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1818,9 +1957,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1843,7 +1981,9 @@
   Register map = ToRegister(instr->TempAt(0));
   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  __ cmp(map, factory()->the_hole_value());  // Patched to cached map.
+  Handle<JSGlobalPropertyCell> cache_cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ cmp(map, Operand::Cell(cache_cell));  // Patched to cached map.
   __ j(not_equal, &cache_miss, Label::kNear);
   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   __ jmp(&done);
@@ -1909,26 +2049,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -1936,9 +2056,6 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   Label true_value, done;
   __ test(eax, Operand(eax));
   __ j(condition, &true_value, Label::kNear);
@@ -1962,6 +2079,17 @@
   }
   __ mov(esp, ebp);
   __ pop(ebp);
+  if (dynamic_frame_alignment_) {
+    Label aligned;
+    // Frame alignment marker (padding) is below arguments,
+    // and receiver, so its return-address-relative offset is
+    // (num_arguments + 2) words.
+    __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+           Immediate(factory()->frame_alignment_marker()));
+    __ j(not_equal, &aligned);
+    __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+    __ bind(&aligned);
+  }
   __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
@@ -1969,7 +2097,7 @@
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
@@ -1990,20 +2118,27 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register object = ToRegister(instr->TempAt(0));
+  Register address = ToRegister(instr->TempAt(1));
   Register value = ToRegister(instr->InputAt(0));
-  Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+  ASSERT(!value.is(object));
+  Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+  int offset = JSGlobalPropertyCell::kValueOffset;
+  __ mov(object, Immediate(cell_handle));
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->check_hole_value()) {
-    __ cmp(cell_operand, factory()->the_hole_value());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
 
   // Store the value.
-  __ mov(cell_operand, value);
+  __ mov(FieldOperand(object, offset), value);
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -2013,7 +2148,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2031,10 +2166,19 @@
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
   __ mov(ContextOperand(context, instr->slot_index()), value);
-  if (instr->needs_write_barrier()) {
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     Register temp = ToRegister(instr->TempAt(0));
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, offset, value, temp);
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              temp,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
   }
 }
 
@@ -2055,7 +2199,7 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
   ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2251,16 +2395,14 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result = ToDoubleRegister(instr->result());
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(), instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2330,6 +2472,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -2494,7 +2637,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2647,6 +2790,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2878,6 +3022,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2917,6 +3069,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -2968,12 +3123,12 @@
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->function()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ Drop(1);
 }
 
 
@@ -3023,21 +3178,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ mov(FieldOperand(object, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          temp,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ mov(FieldOperand(temp, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp,
+                          offset,
+                          value,
+                          object,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -3049,7 +3219,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3096,6 +3266,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3112,6 +3283,13 @@
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ test(value, Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3128,13 +3306,21 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key,
            FieldOperand(elements,
                         key,
                         times_pointer_size,
                         FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3165,99 +3351,75 @@
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->value()).is(eax));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable);
+  __ mov(new_map_reg, to_map);
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register object_reg = ToRegister(instr->object());
+    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+    // Write barrier.
+    ASSERT_NE(instr->temp_reg(), NULL);
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(edx));
+    ASSERT(new_map_reg.is(ebx));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(edx));
+    ASSERT(new_map_reg.is(ebx));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ test(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ SmiUntag(result);
-  __ add(index, Operand(result));
-  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
-         Immediate(factory()->empty_string()));
-  __ j(not_equal, deferred->entry());
-  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(not_zero, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ movzx_b(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    factory(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3300,6 +3462,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3379,6 +3542,7 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3446,6 +3610,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3547,16 +3712,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3589,8 +3744,7 @@
       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
       __ j(less, &convert, Label::kNear);
       // Pop FPU stack before deoptimizing.
-      __ ffree(0);
-      __ fincstp();
+      __ fstp(0);
       DeoptimizeIf(no_condition, instr->environment());
 
       // Reserve space for 64 bit answer.
@@ -3638,6 +3792,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3840,7 +4004,7 @@
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
-      __ cmpb(Operand(temp), tag);
+      __ cmp(temp, tag);
       DeoptimizeIf(not_equal, instr->environment());
     }
   }
@@ -3848,9 +4012,16 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Operand operand = ToOperand(instr->InputAt(0));
-  __ cmp(operand, instr->hydrogen()->target());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ cmp(reg, Operand::Cell(cell));
+  } else {
+    Operand operand = ToOperand(instr->value());
+    __ cmp(operand, instr->hydrogen()->target());
+  }
   DeoptimizeIf(not_equal, instr->environment());
 }
 
@@ -3953,11 +4124,17 @@
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
+
+  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
   // Setup the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(instr->hydrogen()->constant_elements()));
+  __ push(Immediate(constant_elements));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -3973,20 +4150,97 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        constant_elements_kind == FAST_DOUBLE_ELEMENTS
+        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(ecx));
+  ASSERT(!result.is(ecx));
+
+  if (FLAG_debug_code) {
+    LoadHeapObject(ecx, object);
+    __ cmp(source, ecx);
+    __ Assert(equal, "Unexpected object literal boilerplate");
+  }
+
+  // Increase the offset so that subsequent objects end up right after
+  // this one.
+  int current_offset = *offset;
+  int size = object->map()->instance_size();
+  *offset += size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  ASSERT(object->elements()->length() == 0 ||
+         object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    __ mov(ecx, FieldOperand(source, i));
+    __ mov(FieldOperand(result, current_offset + i), ecx);
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ lea(ecx, Operand(result, *offset));
+      __ mov(FieldOperand(result, total_offset), ecx);
+      LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+      __ mov(FieldOperand(result, total_offset), ecx);
+    } else {
+      __ mov(FieldOperand(result, total_offset), Immediate(value));
+    }
+  }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(Immediate(Smi::FromInt(size)));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
   // Setup the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(instr->hydrogen()->constant_properties()));
+  __ push(Immediate(constant_properties));
   int flags = instr->hydrogen()->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -3995,11 +4249,16 @@
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
 
-  // Pick the right runtime function to call.
+  // Pick the right runtime function or stub to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -4072,8 +4331,7 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ push(Immediate(shared_info));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -4105,12 +4363,11 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
-                                                  input,
-                                                  instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  Condition final_branch_condition =
+      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+  if (final_branch_condition != no_condition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -4154,10 +4411,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4175,11 +4434,8 @@
     final_branch_condition = zero;
 
   } else {
-    final_branch_condition = not_equal;
     __ jmp(false_label);
-    // A dead branch instruction will be generated after this point.
   }
-
   return final_branch_condition;
 }
 
@@ -4281,6 +4537,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index d955450..9d1a4f7 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,6 +58,7 @@
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
+        dynamic_frame_alignment_(false),
         deferred_(8),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
@@ -130,8 +131,12 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  }
+  bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
+  void set_dynamic_frame_alignment(bool value) {
+    dynamic_frame_alignment_ = value;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -222,6 +227,7 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
                                 LOperand* key,
                                 ElementsKind elements_kind,
@@ -235,6 +241,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -253,7 +260,6 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
@@ -262,8 +268,10 @@
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -273,6 +281,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -281,6 +296,14 @@
                                        Register object,
                                        Handle<Map> type,
                                        Handle<String> name);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   void EnsureSpaceForLazyDeopt();
 
   LChunk* const chunk_;
@@ -295,6 +318,7 @@
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
+  bool dynamic_frame_alignment_;
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
@@ -338,16 +362,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -358,6 +386,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 3dc220d..4e5f278 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -110,22 +110,17 @@
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -214,10 +209,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -229,6 +225,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -243,6 +246,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(1)->PrintTo(stream);
+  InputAt(2)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -351,7 +362,11 @@
 
 int LChunk::GetNextSpillIndex(bool is_double) {
   // Skip a slot if for a double-width slot.
-  if (is_double) spill_slot_count_++;
+  if (is_double) {
+    spill_slot_count_ |= 1;  // Make it odd, so incrementing makes it even.
+    spill_slot_count_++;
+    num_double_slots_++;
+  }
   return spill_slot_count_++;
 }
 
@@ -447,8 +462,14 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new LInstructionGap(block);
+  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -523,7 +544,7 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new LChunk(info(), graph());
+  chunk_ = new(zone()) LChunk(info(), graph());
   HPhase phase("Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -559,14 +580,14 @@
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new LUnallocated(LUnallocated::FIXED_REGISTER,
-                          Register::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                          XMMRegister::ToAllocationIndex(reg));
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -581,30 +602,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                              LUnallocated::USED_AT_START));
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new LUnallocated(LUnallocated::NONE,
-                                     LUnallocated::USED_AT_START));
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
 }
 
 
@@ -639,7 +660,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new LUnallocated(LUnallocated::ANY));
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -664,14 +685,15 @@
 
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::NONE));
+  return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -679,14 +701,16 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -707,7 +731,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -737,7 +763,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -749,7 +775,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -766,13 +793,14 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new LPointerMap(position_));
+  instr->set_pointer_map(new(zone()) LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
   allocator_->RecordTemporary(operand);
   return operand;
 }
@@ -793,40 +821,17 @@
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new LLabel(instr->block());
+  return new(zone()) LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* context = UseFixed(instr->context(), esi);
-    LOperand* left = UseFixed(instr->left(), edx);
-    LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
-    return MarkAsCall(DefineFixed(result, eax), instr);
-  }
+  return AssignEnvironment(new(zone()) LDeoptimize);
 }
 
 
@@ -839,7 +844,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new LArithmeticT(op, context, left, right);
+    LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 
@@ -873,7 +878,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -886,7 +891,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new LArithmeticD(op, left, right);
+  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -906,7 +911,7 @@
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
   LArithmeticT* result =
-      new LArithmeticT(op, context, left_operand, right_operand);
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -994,20 +999,23 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
-                                          ast_id,
-                                          hydrogen_env->parameter_count(),
-                                          argument_count_,
-                                          value_count,
-                                          outer);
-  int argument_index = 0;
+  LEnvironment* result =
+      new(zone()) LEnvironment(hydrogen_env->closure(),
+                               ast_id,
+                               hydrogen_env->parameter_count(),
+                               argument_count_,
+                               value_count,
+                               outer);
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,7 +1024,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new(zone()) LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1028,7 +1036,7 @@
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new LGoto(instr->FirstSuccessor()->block_id());
+  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
@@ -1040,7 +1048,7 @@
     HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new LGoto(successor->block_id());
+    return new(zone()) LGoto(successor->block_id());
   }
   ToBooleanStub::Types expected = instr->expected_input_types();
   // We need a temporary register when we have to access the map *or* we have
@@ -1048,24 +1056,24 @@
   // involving maps).
   bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
   LOperand* temp = needs_temp ? TempRegister() : NULL;
-  return AssignEnvironment(new LBranch(UseRegister(v), temp));
+  return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new LCmpMapAndBranch(value);
+  return new(zone()) LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new LArgumentsElements);
+  return DefineAsRegister(new(zone()) LArgumentsElements);
 }
 
 
@@ -1073,7 +1081,7 @@
   LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
   LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
   LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new LInstanceOf(context, left, right);
+  LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1081,7 +1089,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new LInstanceOfKnownGlobal(
+      new(zone()) LInstanceOfKnownGlobal(
           UseFixed(instr->context(), esi),
           UseFixed(instr->left(), InstanceofStub::left()),
           FixedTemp(edi));
@@ -1095,11 +1103,11 @@
   LOperand* length = UseFixed(instr->length(), ebx);
   LOperand* elements = UseFixed(instr->elements(), ecx);
   LOperand* temp = FixedTemp(edx);
-  LApplyArguments* result = new LApplyArguments(function,
-                                                receiver,
-                                                length,
-                                                elements,
-                                                temp);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements,
+                                                        temp);
   return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1107,42 +1115,44 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseAny(instr->argument());
-  return new LPushArgument(argument);
+  return new(zone()) LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LOuterContext(context));
+  return DefineAsRegister(new(zone()) LOuterContext(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalObject(context));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LGlobalReceiver(global_object));
+  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
 }
 
 
@@ -1150,7 +1160,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new LInvokeFunction(context, function);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1162,17 +1172,20 @@
     ASSERT(instr->value()->representation().IsDouble());
     LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return DefineSameAsFirst(result);
   } else if (op == kMathSin || op == kMathCos) {
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
-    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
+    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
+                                                                  input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1197,7 +1210,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseFixed(instr->key(), ecx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new LCallKeyed(context, key);
+  LCallKeyed* result = new(zone()) LCallKeyed(context, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1205,7 +1218,7 @@
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallNamed* result = new LCallNamed(context);
+  LCallNamed* result = new(zone()) LCallNamed(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1213,14 +1226,14 @@
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallGlobal* result = new LCallGlobal(context);
+  LCallGlobal* result = new(zone()) LCallGlobal(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
 }
 
 
@@ -1228,15 +1241,16 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new LCallNew(context, constructor);
+  LCallNew* result = new(zone()) LCallNew(context, constructor);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new LCallFunction(context);
+  LCallFunction* result = new(zone()) LCallFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1244,7 +1258,7 @@
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
 }
 
 
@@ -1263,8 +1277,26 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* context = UseFixed(instr->context(), esi);
+    LOperand* left = UseFixed(instr->left(), edx);
+    LOperand* right = UseFixed(instr->right(), eax);
+    LArithmeticT* result =
+        new(zone()) LArithmeticT(instr->op(), context, left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
 }
 
 
@@ -1272,21 +1304,11 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new LBitNotI(input);
+  LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1296,7 +1318,7 @@
     LOperand* temp = FixedTemp(edx);
     LOperand* dividend = UseFixed(instr->left(), eax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new LDivI(dividend, divisor, temp);
+    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1314,7 +1336,8 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod =
+          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is
@@ -1322,7 +1345,7 @@
       LOperand* temp = FixedTemp(edx);
       LOperand* value = UseFixed(instr->left(), eax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new LModI(value, divisor, temp);
+      LModI* mod = new(zone()) LModI(value, divisor, temp);
       result = DefineFixed(mod, edx);
     }
 
@@ -1339,7 +1362,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1355,7 +1378,7 @@
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
-    LMulI* mul = new LMulI(left, right, temp);
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
     return AssignEnvironment(DefineSameAsFirst(mul));
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
@@ -1372,7 +1395,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new LSubI(left, right);
+    LSubI* sub = new(zone()) LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1393,7 +1416,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new LAddI(left, right);
+    LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1418,21 +1441,19 @@
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), xmm2) :
       UseFixed(instr->right(), eax);
-  LPower* result = new LPower(left, right);
+  LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
-  LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
-  LCmpT* result = new LCmpT(context, left, right);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1443,16 +1464,23 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    return new(zone()) LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
-    return new LCmpIDAndBranch(left, right);
+    LOperand* left;
+    LOperand* right;
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      left = UseRegisterOrConstantAtStart(instr->left());
+      right = UseRegisterOrConstantAtStart(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1461,49 +1489,73 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseAtStart(instr->right());
-  return new LCmpObjectEqAndBranch(left, right);
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
   HCompareConstantEqAndBranch* instr) {
-  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+  return new(zone()) LCmpConstantEqAndBranch(
+      UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   // We only need a temp register for non-strict compare.
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
-  return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
+  return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsSmiAndBranch(Use(instr->value()));
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr  ->value()->representation().IsTagged());
-  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
-                                      TempRegister());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+
+  LStringCompareAndBranch* result = new
+      LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
-                                       TempRegister());
+  return new(zone()) LHasInstanceTypeAndBranch(
+      UseRegisterAtStart(instr->value()),
+      TempRegister());
 }
 
 
@@ -1512,14 +1564,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new LGetCachedArrayIndex(value));
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LHasCachedArrayIndexAndBranch(
+  return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1527,7 +1579,7 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+  return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
                                    TempRegister(),
                                    TempRegister());
 }
@@ -1535,32 +1587,32 @@
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LJSArrayLength(array));
+  return DefineAsRegister(new(zone()) LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LFixedArrayBaseLength(array));
+  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LElementsKind(object));
+  return DefineAsRegister(new(zone()) LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new LValueOf(object, TempRegister());
+  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
   return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new LBoundsCheck(
+  return AssignEnvironment(new(zone()) LBoundsCheck(
       UseRegisterOrConstantAtStart(instr->index()),
       UseAtStart(instr->length())));
 }
@@ -1576,7 +1628,7 @@
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), eax);
-  return MarkAsCall(new LThrow(context, value), instr);
+  return MarkAsCall(new(zone()) LThrow(context, value), instr);
 }
 
 
@@ -1599,7 +1651,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new LNumberUntagD(value);
+      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1611,10 +1663,10 @@
             (truncating && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
-        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1624,7 +1676,7 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new LNumberTagD(value, temp);
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
@@ -1633,21 +1685,23 @@
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
-      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new LSmiTag(value));
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
       } else {
-        LNumberTagI* result = new LNumberTagI(value);
+        LNumberTagI* result = new(zone()) LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+      return DefineAsRegister(
+          new(zone()) LInteger32ToDouble(Use(instr->value())));
     }
   }
   UNREACHABLE();
@@ -1657,40 +1711,46 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckNonSmi(value));
+  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  LCheckInstanceType* result = new LCheckInstanceType(value, temp);
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value));
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new LCheckFunction(value));
+  // If the target is in new space, we'll emit a global cell compare and so
+  // want the value in a register.  If the target gets promoted before we
+  // emit code, we will still get the register but will do an immediate
+  // compare instead of the cell compare.  This is safe.
+  LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
+      ? UseRegisterAtStart(instr->value())
+      : UseAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new LCheckMap(value);
+  LCheckMap* result = new(zone()) LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1700,17 +1760,17 @@
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
     LOperand* reg = UseRegister(value);
-    return DefineAsRegister(new LClampDToUint8(reg));
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
-    return DefineFixed(new LClampIToUint8(reg), eax);
+    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
   } else {
     ASSERT(input_rep.IsTagged());
     LOperand* reg = UseFixed(value, eax);
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
     LOperand* temp = FixedTemp(xmm1);
-    LClampTToUint8* result = new LClampTToUint8(reg, temp);
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   }
 }
@@ -1725,7 +1785,7 @@
     LOperand* reg = UseRegister(value);
     LOperand* temp_reg =
         CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
-    result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+    result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
   } else if (input_rep.IsInteger32()) {
     // Canonicalization should already have removed the hydrogen instruction in
     // this case, since it is a noop.
@@ -1738,29 +1798,29 @@
     // temps. Reserve xmm1 explicitly.
     LOperand* xmm_temp =
         CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
-    result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+    result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
   }
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new LReturn(UseFixed(instr->value(), eax));
+  return new(zone()) LReturn(UseFixed(instr->value(), eax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new LConstantI);
+    return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
     LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
         ? TempRegister()
         : NULL;
-    return DefineAsRegister(new LConstantD(temp));
+    return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new LConstantT);
+    return DefineAsRegister(new(zone()) LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1769,8 +1829,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1779,15 +1839,18 @@
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* global_object = UseFixed(instr->global_object(), eax);
-  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+      new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
+                           TempRegister(),
+                           TempRegister());
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1796,38 +1859,36 @@
   LOperand* global_object = UseFixed(instr->global_object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
   LStoreGlobalGeneric* result =
-      new LStoreGlobalGeneric(context, global_object, value);
+      new(zone()) LStoreGlobalGeneric(context, global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadContextSlot(context));
+  return DefineAsRegister(new(zone()) LLoadContextSlot(context));
 }
 
 
 LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
-  LOperand* context;
   LOperand* value;
   LOperand* temp;
+  LOperand* context = UseRegister(instr->context());
   if (instr->NeedsWriteBarrier()) {
-    context = UseTempRegister(instr->context());
     value = UseTempRegister(instr->value());
     temp = TempRegister();
   } else {
-    context = UseRegister(instr->context());
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  return new LStoreContextSlot(context, value, temp);
+  return new(zone()) LStoreContextSlot(context, value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new LLoadNamedField(obj));
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
 }
 
 
@@ -1838,12 +1899,12 @@
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), eax);
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
     LLoadNamedFieldPolymorphic* result =
-        new LLoadNamedFieldPolymorphic(context, obj);
+        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1852,7 +1913,7 @@
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseFixed(instr->object(), eax);
-  LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1860,21 +1921,21 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new LLoadFunctionPrototype(UseRegister(instr->function()),
-                                 TempRegister())));
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+                                         TempRegister())));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadElements(input));
+  return DefineAsRegister(new(zone()) LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
 }
 
 
@@ -1884,7 +1945,7 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1896,7 +1957,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new LLoadKeyedFastDoubleElement(elements, key);
+      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1916,7 +1977,7 @@
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer,
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
@@ -1932,7 +1993,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* key = UseFixed(instr->key(), eax);
 
-  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1944,7 +2006,7 @@
   ASSERT(instr->object()->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
 
-  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* obj = UseRegister(instr->object());
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
       : UseRegisterAtStart(instr->value());
@@ -1952,7 +2014,7 @@
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
 
-  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+  return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
 }
 
 
@@ -1966,7 +2028,7 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new LStoreKeyedFastDoubleElement(elements, key, val);
+  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
@@ -1996,9 +2058,9 @@
     val = UseRegister(instr->value());
   }
 
-  return new LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                key,
-                                                val);
+  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                        key,
+                                                        val);
 }
 
 
@@ -2013,17 +2075,45 @@
   ASSERT(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
-      new LStoreKeyedGeneric(context, object, key, value);
+      new(zone()) LStoreKeyedGeneric(context, object, key, value);
   return MarkAsCall(result, instr);
 }
 
 
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), eax);
+    LOperand* fixed_object_reg = FixedTemp(edx);
+    LOperand* new_map_reg = FixedTemp(ebx);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object,
+                                            new_map_reg,
+                                            fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
-  LOperand* obj = needs_write_barrier
-      ? UseTempRegister(instr->object())
-      : UseRegisterAtStart(instr->object());
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = instr->is_in_object()
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = UseRegisterAtStart(instr->object());
+  }
 
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
@@ -2035,7 +2125,7 @@
       ? TempRegister()
       : NULL;
 
-  return new LStoreNamedField(obj, val, temp);
+  return new(zone()) LStoreNamedField(obj, val, temp);
 }
 
 
@@ -2044,7 +2134,8 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
 
-  LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
+  LStoreNamedGeneric* result =
+      new(zone()) LStoreNamedGeneric(context, object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2053,7 +2144,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  LStringAdd* string_add = new LStringAdd(context, left, right);
+  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
   return MarkAsCall(DefineFixed(string_add, eax), instr);
 }
 
@@ -2062,7 +2153,8 @@
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
   LOperand* context = UseAny(instr->context());
-  LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
@@ -2070,38 +2162,51 @@
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
   LOperand* context = UseAny(instr->context());
-  LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new LStringLength(string));
+  return DefineAsRegister(new(zone()) LStringLength(string));
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LObjectLiteralFast(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+    HObjectLiteralGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LObjectLiteralGeneric(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
 }
 
 
@@ -2109,7 +2214,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseAtStart(instr->object());
   LOperand* key = UseOrConstantAtStart(instr->key());
-  LDeleteProperty* result = new LDeleteProperty(context, object, key);
+  LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2117,13 +2222,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new LOsrEntry);
+  return AssignEnvironment(new(zone()) LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new LParameter, spill_index);
+  return DefineAsSpilled(new(zone()) LParameter, spill_index);
 }
 
 
@@ -2133,14 +2238,14 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallStub* result = new LCallStub(context);
+  LCallStub* result = new(zone()) LCallStub(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2158,14 +2263,15 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result =
+      new(zone()) LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new LToFastProperties(object);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2173,19 +2279,19 @@
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseAtStart(instr->value());
-  LTypeof* result = new LTypeof(context, value);
+  LTypeof* result = new(zone()) LTypeof(context, value);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new LIsConstructCallAndBranch(TempRegister());
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2209,7 +2315,7 @@
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
     ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
-    LLazyBailout* lazy_bailout = new LLazyBailout;
+    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2224,11 +2330,12 @@
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
-    return MarkAsCall(new LStackCheck(context), instr);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
     ASSERT(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
-    return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
   }
 }
 
@@ -2257,7 +2364,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new LIn(context, key, object);
+  LIn* result = new(zone()) LIn(context, key, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index b0ab6b4..5170647 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -101,10 +101,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -126,7 +128,8 @@
   V(NumberTagD)                                 \
   V(NumberTagI)                                 \
   V(NumberUntagD)                               \
-  V(ObjectLiteral)                              \
+  V(ObjectLiteralFast)                          \
+  V(ObjectLiteralGeneric)                       \
   V(OsrEntry)                                   \
   V(OuterContext)                               \
   V(Parameter)                                  \
@@ -156,6 +159,7 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
@@ -191,8 +195,8 @@
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -288,9 +292,6 @@
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -605,17 +606,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -634,6 +636,19 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -661,6 +676,24 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -787,18 +820,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -1220,10 +1250,12 @@
 };
 
 
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
  public:
-  explicit LStoreGlobalCell(LOperand* value) {
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1248,7 +1280,7 @@
   LOperand* global_object() { return InputAt(1); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(2); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1282,7 +1314,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1299,7 +1330,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1412,17 +1445,19 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LCallFunction(LOperand* context) {
+  explicit LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
+    inputs_[1] = function;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
   LOperand* context() { return inputs_[0]; }
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[1]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1604,7 +1639,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1626,7 +1660,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1716,7 +1750,31 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
   LOperand* value() { return inputs_[3]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
 };
 
 
@@ -1788,6 +1846,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1900,16 +1960,29 @@
 };
 
 
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LObjectLiteral(LOperand* context) {
+  explicit LObjectLiteralFast(LOperand* context) {
     inputs_[0] = context;
   }
 
   LOperand* context() { return inputs_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LObjectLiteralGeneric(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
 };
 
 
@@ -2060,6 +2133,7 @@
       graph_(graph),
       instructions_(32),
       pointer_maps_(8),
+      num_double_slots_(0),
       inlined_closures_(1) { }
 
   void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2073,6 +2147,8 @@
   int ParameterAt(int index);
   int GetParameterStackSlot(int index) const;
   int spill_slot_count() const { return spill_slot_count_; }
+  int num_double_slots() const { return num_double_slots_; }
+
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
   const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2114,6 +2190,7 @@
   HGraph* const graph_;
   ZoneList<LInstruction*> instructions_;
   ZoneList<LPointerMap*> pointer_maps_;
+  int num_double_slots_;
   ZoneList<Handle<JSFunction> > inlined_closures_;
 };
 
@@ -2124,6 +2201,7 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
+        isolate_(graph->isolate()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2153,6 +2231,7 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
+  Zone* zone() { return isolate_->zone(); }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2249,12 +2328,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2264,6 +2343,7 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
+  Isolate* isolate_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ce6d6a6..fcae7a2 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -44,7 +44,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -52,33 +53,75 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::InNewSpace(
+    Register object,
+    Register scratch,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
   }
+  // Check that we can use a test_b.
+  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+           | (1 << MemoryChunk::IN_TO_SPACE);
+  // If non-zero, the page belongs to new-space.
+  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+         static_cast<uint8_t>(mask));
+  j(cc, condition_met, condition_met_distance);
+}
 
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, ~Page::kPageAlignmentMask);
 
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shr(addr, Page::kRegionSizeLog2);
-  and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
-
-  // Set dirty mark for region.
-  // Bit tests with a memory operand should be avoided on Intel processors,
-  // as they usually have long latency and multiple uops. We load the bit base
-  // operand to a register at first and store it back after bit set.
-  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
-  bts(Operand(scratch), addr);
-  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+void MacroAssembler::RememberedSetHelper(
+    Register object,  // Only used for debug checks.
+    Register addr,
+    Register scratch,
+    SaveFPRegsMode save_fp,
+    MacroAssembler::RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(scratch, Operand::StaticVariable(store_buffer));
+  // Store pointer to buffer.
+  mov(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  add(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  mov(Operand::StaticVariable(store_buffer), scratch);
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -112,100 +155,144 @@
 }
 
 
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch,
-                                Label::Distance branch_near) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch),
-         Immediate(ExternalReference::new_space_mask(isolate())));
-    cmp(Operand(scratch),
-        Immediate(ExternalReference::new_space_start(isolate())));
-    j(cc, branch, branch_near);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start(isolate()).address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, isolate()->heap()->NewSpaceMask());
-    j(cc, branch, branch_near);
+void MacroAssembler::RecordWriteArray(Register object,
+                                      Register value,
+                                      Register index,
+                                      SaveFPRegsMode save_fp,
+                                      RememberedSetAction remembered_set_action,
+                                      SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    test(value, Immediate(kSmiTagMask));
+    j(zero, &done);
+  }
+
+  // Array access: calculate the destination address in the same manner as
+  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+  // into an array of words.
+  Register dst = index;
+  lea(dst, Operand(object, index, times_half_pointer_size,
+                   FixedArray::kHeaderSize - kHeapObjectTag));
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register scratch) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis and stores into young gen.
+  // catch stores of Smis.
   Label done;
 
   // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
-
-  InNewSpace(object, value, equal, &done, Label::kNear);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = scratch;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // Array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
-    // into an array of words.
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kSmiTag == 0);
-    lea(dst, Operand(object, dst, times_half_pointer_size,
-                     FixedArray::kHeaderSize - kHeapObjectTag));
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done, Label::kNear);
   }
-  RecordWriteHelper(object, dst, value);
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    test_b(dst, (1 << kPointerSizeLog2) - 1);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmp(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
 
-  // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done, Label::kNear);
+  }
 
-  InNewSpace(object, value, equal, &done);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
 
-  RecordWriteHelper(object, address, value);
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(address, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
   }
@@ -224,7 +311,7 @@
 
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
-    xor_(dst, Operand(dst));  // Shorter than mov.
+    xor_(dst, dst);  // Shorter than mov.
   } else {
     mov(dst, x);
   }
@@ -265,7 +352,7 @@
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   // see ROOT_ACCESSOR macro in factory.h
-  Handle<Object> value(&isolate()->heap()->roots_address()[index]);
+  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
 
@@ -287,13 +374,111 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Map::kMaximumBitField2FastElementValue);
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register key,
+    Register scratch1,
+    XMMRegister scratch2,
+    Label* fail,
+    bool specialize_for_processor) {
+  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmp(FieldOperand(maybe_number, offset),
+      Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+  } else {
+    fld_d(Operand::StaticVariable(canonical_nan_reference));
+  }
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. Convert to a double and store.
+  // Preserve original value.
+  mov(scratch1, maybe_number);
+  SmiUntag(scratch1);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope fscope(SSE2);
+    cvtsi2sd(scratch2, scratch1);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    push(scratch1);
+    fild_s(Operand(esp, 0));
+    pop(scratch1);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
@@ -345,7 +530,7 @@
                                             Register scratch,
                                             Label* fail) {
   movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   cmp(scratch,
       LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   j(above, fail);
@@ -355,8 +540,7 @@
 void MacroAssembler::FCmp() {
   if (CpuFeatures::IsSupported(CMOV)) {
     fucomip();
-    ffree(0);
-    fincstp();
+    fstp(0);
   } else {
     fucompp();
     push(eax);
@@ -402,7 +586,7 @@
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
   push(esi);
   push(Immediate(Smi::FromInt(type)));
   push(Immediate(CodeObject()));
@@ -429,7 +613,7 @@
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
@@ -451,14 +635,14 @@
   if (save_doubles) {
     CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
-    sub(Operand(esp), Immediate(space));
+    sub(esp, Immediate(space));
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
-    sub(Operand(esp), Immediate(argc * kPointerSize));
+    sub(esp, Immediate(argc * kPointerSize));
   }
 
   // Get the required frame alignment for the OS.
@@ -478,7 +662,7 @@
 
   // Setup argc and argv in callee-saved registers.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  mov(edi, Operand(eax));
+  mov(edi, eax);
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
@@ -532,7 +716,7 @@
 
 
 void MacroAssembler::LeaveApiExitFrame() {
-  mov(esp, Operand(ebp));
+  mov(esp, ebp);
   pop(ebp);
 
   LeaveExitFrameEpilogue();
@@ -540,47 +724,65 @@
 
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+                                    HandlerType type,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // The pc (return address) is already on TOS.
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // We will build up the handler from the bottom by pushing on the stack.
+  // First compute the state and push the frame pointer and context.
+  unsigned state = StackHandler::OffsetField::encode(handler_index);
   if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      push(Immediate(StackHandler::TRY_CATCH));
-    } else {
-      push(Immediate(StackHandler::TRY_FINALLY));
-    }
     push(ebp);
     push(esi);
+    state |= (type == TRY_CATCH_HANDLER)
+        ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+        : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for ebp. We expect the code throwing an exception to check ebp
-    // before dereferencing it to restore the context.
-    push(Immediate(StackHandler::ENTRY));
+    // The frame pointer does not point to a JS frame so we save NULL for
+    // ebp. We expect the code throwing an exception to check ebp before
+    // dereferencing it to restore the context.
     push(Immediate(0));  // NULL frame pointer.
     push(Immediate(Smi::FromInt(0)));  // No context.
+    state |= StackHandler::KindField::encode(StackHandler::ENTRY);
   }
-  // Save the current handler as the next handler.
-  push(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                 isolate())));
-  // Link this handler as the new current one.
-  mov(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                isolate())),
-      esp);
+
+  // Push the state and the code object.
+  push(Immediate(state));
+  push(CodeObject());
+
+  // Link the current handler as the next handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  push(Operand::StaticVariable(handler_address));
+  // Set this new handler as the current one.
+  mov(Operand::StaticVariable(handler_address), esp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
-                                                isolate())));
-  add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  pop(Operand::StaticVariable(handler_address));
+  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // eax = exception, edi = code object, edx = state.
+  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
+  shr(edx, StackHandler::kKindWidth);
+  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
+  SmiUntag(edx);
+  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+  jmp(edi);
 }
 
 
@@ -588,36 +790,39 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // eax must hold the exception.
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in eax.
   if (!value.is(eax)) {
     mov(eax, value);
   }
-
-  // Drop the sp to the top of the handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress,
-                                    isolate());
+  // Drop the stack pointer to the top of the top handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   mov(esp, Operand::StaticVariable(handler_address));
-
-  // Restore next handler, context, and frame pointer; discard handler state.
+  // Restore the next handler.
   pop(Operand::StaticVariable(handler_address));
+
+  // Remove the code object and state, compute the handler address in edi.
+  pop(edi);  // Code object.
+  pop(edx);  // Index and state.
+
+  // Restore the context and frame pointer.
   pop(esi);  // Context.
   pop(ebp);  // Frame pointer.
-  pop(edx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
+  // ebp or esi.
   Label skip;
-  cmp(Operand(edx), Immediate(StackHandler::ENTRY));
-  j(equal, &skip, Label::kNear);
+  test(esi, esi);
+  j(zero, &skip, Label::kNear);
   mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
   bind(&skip);
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
@@ -626,61 +831,55 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // eax must hold the exception.
-  if (!value.is(eax)) {
-    mov(eax, value);
-  }
-
-  // Drop sp to the top stack handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress,
-                                    isolate());
-  mov(esp, Operand::StaticVariable(handler_address));
-
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done, Label::kNear);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  mov(esp, Operand(esp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
-
-  // Set the top handler address to next handler past the current ENTRY handler.
-  pop(Operand::StaticVariable(handler_address));
-
+  // The exception is expected in eax.
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress,
-        isolate());
-    mov(eax, false);
-    mov(Operand::StaticVariable(external_caught), eax);
+    ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                      isolate());
+    mov(Operand::StaticVariable(external_caught), Immediate(false));
 
     // Set pending exception and eax to out of memory exception.
     ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                         isolate());
     mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
     mov(Operand::StaticVariable(pending_exception), eax);
+  } else if (!value.is(eax)) {
+    mov(eax, value);
   }
 
-  // Discard the context saved in the handler and clear the context pointer.
-  pop(edx);
-  Set(esi, Immediate(0));
+  // Drop the stack pointer to the top of the top stack handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  mov(esp, Operand::StaticVariable(handler_address));
 
-  // Restore fp from handler and discard handler state.
+  // Unwind the handlers until the top ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind, Label::kNear);
+  bind(&fetch_next);
+  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::ENTRY == 0);
+  test(Operand(esp, StackHandlerConstants::kStateOffset),
+       Immediate(StackHandler::KindField::kMask));
+  j(not_zero, &fetch_next);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(Operand::StaticVariable(handler_address));
+
+  // Remove the code object and state, compute the handler address in edi.
+  pop(edi);  // Code object.
+  pop(edx);  // Index and state.
+
+  // Clear the context pointer and frame pointer (0 was saved in the handler).
+  pop(esi);
   pop(ebp);
-  pop(edx);  // State.
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
@@ -696,7 +895,7 @@
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
-    cmp(Operand(scratch), Immediate(0));
+    cmp(scratch, Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
   // Load the global context of the current context.
@@ -752,51 +951,6 @@
 }
 
 
-// Compute the hash code from the untagged key.  This must be kept in sync
-// with ComputeIntegerHash in utils.h.
-//
-// Note: r0 will contain hash code
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
-  // Xor original key with a seed.
-  if (Serializer::enabled()) {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    mov(scratch, Immediate(Heap::kHashSeedRootIndex));
-    mov(scratch, Operand::StaticArray(scratch,
-                                      times_pointer_size,
-                                      roots_address));
-    SmiUntag(scratch);
-    xor_(r0, Operand(scratch));
-  } else {
-    int32_t seed = isolate()->heap()->HashSeed();
-    xor_(r0, seed);
-  }
-
-  // hash = ~hash + (hash << 15);
-  mov(scratch, r0);
-  not_(r0);
-  shl(scratch, 15);
-  add(r0, Operand(scratch));
-  // hash = hash ^ (hash >> 12);
-  mov(scratch, r0);
-  shr(scratch, 12);
-  xor_(r0, Operand(scratch));
-  // hash = hash + (hash << 2);
-  lea(r0, Operand(r0, r0, times_4, 0));
-  // hash = hash ^ (hash >> 4);
-  mov(scratch, r0);
-  shr(scratch, 4);
-  xor_(r0, Operand(scratch));
-  // hash = hash * 2057;
-  imul(r0, r0, 2057);
-  // hash = hash ^ (hash >> 16);
-  mov(scratch, r0);
-  shr(scratch, 16);
-  xor_(r0, Operand(scratch));
-}
-
-
-
 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                               Register elements,
                                               Register key,
@@ -822,10 +976,33 @@
 
   Label done;
 
-  GetNumberHash(r0, r1);
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  mov(r1, r0);
+  not_(r0);
+  shl(r1, 15);
+  add(r0, r1);
+  // hash = hash ^ (hash >> 12);
+  mov(r1, r0);
+  shr(r1, 12);
+  xor_(r0, r1);
+  // hash = hash + (hash << 2);
+  lea(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  mov(r1, r0);
+  shr(r1, 4);
+  xor_(r0, r1);
+  // hash = hash * 2057;
+  imul(r0, r0, 2057);
+  // hash = hash ^ (hash >> 16);
+  mov(r1, r0);
+  shr(r1, 16);
+  xor_(r0, r1);
 
   // Compute capacity mask.
-  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
   shr(r1, kSmiTagSize);  // convert smi to int
   dec(r1);
 
@@ -836,19 +1013,19 @@
     mov(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      add(Operand(r2), Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+      add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
     }
-    and_(r2, Operand(r1));
+    and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    ASSERT(NumberDictionary::kEntrySize == 3);
     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
 
     // Check if the key matches.
     cmp(key, FieldOperand(elements,
                           r2,
                           times_pointer_size,
-                          SeededNumberDictionary::kElementsStartOffset));
+                          NumberDictionary::kElementsStartOffset));
     if (i != (kProbes - 1)) {
       j(equal, &done);
     } else {
@@ -859,7 +1036,7 @@
   bind(&done);
   // Check that the value is a normal propety.
   const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   ASSERT_EQ(NORMAL, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
@@ -867,7 +1044,7 @@
 
   // Get the value at the masked, scaled index.
   const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+      NumberDictionary::kElementsStartOffset + kPointerSize;
   mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
 }
 
@@ -894,7 +1071,7 @@
   if (scratch.is(no_reg)) {
     mov(result, Operand::StaticVariable(new_space_allocation_top));
   } else {
-    mov(Operand(scratch), Immediate(new_space_allocation_top));
+    mov(scratch, Immediate(new_space_allocation_top));
     mov(result, Operand(scratch, 0));
   }
 }
@@ -953,7 +1130,7 @@
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
-  add(Operand(top_reg), Immediate(object_size));
+  add(top_reg, Immediate(object_size));
   j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -964,12 +1141,12 @@
   // Tag result if requested.
   if (top_reg.is(result)) {
     if ((flags & TAG_OBJECT) != 0) {
-      sub(Operand(result), Immediate(object_size - kHeapObjectTag));
+      sub(result, Immediate(object_size - kHeapObjectTag));
     } else {
-      sub(Operand(result), Immediate(object_size));
+      sub(result, Immediate(object_size));
     }
   } else if ((flags & TAG_OBJECT) != 0) {
-    add(Operand(result), Immediate(kHeapObjectTag));
+    add(result, Immediate(kHeapObjectTag));
   }
 }
 
@@ -1007,7 +1184,7 @@
   // We assume that element_count*element_size + header_size does not
   // overflow.
   lea(result_end, Operand(element_count, element_size, header_size));
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1052,7 +1229,7 @@
   if (!object_size.is(result_end)) {
     mov(result_end, object_size);
   }
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1072,7 +1249,7 @@
       ExternalReference::new_space_allocation_top_address(isolate());
 
   // Make sure the object has no tag before resetting top.
-  and_(Operand(object), Immediate(~kHeapObjectTagMask));
+  and_(object, Immediate(~kHeapObjectTagMask));
 #ifdef DEBUG
   cmp(object, Operand::StaticVariable(new_space_allocation_top));
   Check(below, "Undo allocation of non allocated memory");
@@ -1111,7 +1288,7 @@
   ASSERT(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1145,8 +1322,8 @@
   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
   ASSERT(kCharSize == 1);
-  add(Operand(scratch1), Immediate(kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  add(scratch1, Immediate(kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -1280,7 +1457,7 @@
                                Register scratch) {
   Label loop, done, short_string, short_loop;
   // Experimentation shows that the short string loop is faster if length < 10.
-  cmp(Operand(length), Immediate(10));
+  cmp(length, Immediate(10));
   j(less_equal, &short_string);
 
   ASSERT(source.is(esi));
@@ -1295,12 +1472,12 @@
   mov(scratch, ecx);
   shr(ecx, 2);
   rep_movs();
-  and_(Operand(scratch), Immediate(0x3));
-  add(destination, Operand(scratch));
+  and_(scratch, Immediate(0x3));
+  add(destination, scratch);
   jmp(&done);
 
   bind(&short_string);
-  test(length, Operand(length));
+  test(length, length);
   j(zero, &done);
 
   bind(&short_loop);
@@ -1315,13 +1492,40 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  mov(Operand(start_offset, 0), filler);
+  add(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
+void MacroAssembler::BooleanBitTest(Register object,
+                                    int field_offset,
+                                    int bit_index) {
+  bit_index += kSmiTagSize + kSmiShiftSize;
+  ASSERT(IsPowerOf2(kBitsPerByte));
+  int byte_index = bit_index / kBitsPerByte;
+  int byte_bit_index = bit_index & (kBitsPerByte - 1);
+  test_b(FieldOperand(object, field_offset + byte_index),
+         static_cast<byte>(1 << byte_bit_index));
+}
+
+
+
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  test(op, Operand(op));
+  test(op, op);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1333,10 +1537,10 @@
                                       Register scratch,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  mov(scratch, Operand(op1));
-  or_(scratch, Operand(op2));
+  mov(scratch, op1);
+  or_(scratch, op2);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1345,7 +1549,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -1353,6 +1558,15 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
+  if (miss_on_bound_function) {
+    // If a bound function, go to miss label.
+    mov(scratch,
+        FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
+                   SharedFunctionInfo::kBoundFunction);
+    j(not_zero, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
@@ -1366,7 +1580,7 @@
   // If the prototype or initial map is the hole, don't return it and
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
-  cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
@@ -1389,48 +1603,32 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
   mov(eax, Immediate(isolate()->factory()->undefined_value()));
 }
@@ -1464,18 +1662,11 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -1497,26 +1688,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return isolate()->heap()->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  mov(ebx, Immediate(ExternalReference(f, isolate())));
-  CEntryStub ces(1);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(ExternalReference ref,
                                            int num_arguments) {
   mov(eax, Immediate(num_arguments));
@@ -1539,17 +1710,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(eax, Immediate(num_arguments));
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1559,14 +1719,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(
-      ExternalReference(fid, isolate()), num_arguments, result_size);
-}
-
-
 // If true, a Handle<T> returned by value from a function with cdecl calling
 // convention will be returned directly as a value of location_ field in a
 // register eax.
@@ -1615,8 +1767,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
-                                                         int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   ExternalReference limit_address =
@@ -1629,8 +1781,8 @@
   mov(edi, Operand::StaticVariable(limit_address));
   add(Operand::StaticVariable(level_address), Immediate(1));
 
-  // Call the api function!
-  call(function->address(), RelocInfo::RUNTIME_ENTRY);
+  // Call the api function.
+  call(function_address, RelocInfo::RUNTIME_ENTRY);
 
   if (!kReturnHandlesDirectly) {
     // PrepareCallApiFunction saved pointer to the output slot into
@@ -1645,7 +1797,7 @@
   Label leave_exit_frame;
 
   // Check if the result handle holds 0.
-  test(eax, Operand(eax));
+  test(eax, eax);
   j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
@@ -1668,11 +1820,8 @@
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
-  MaybeObject* result =
-      TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+
   bind(&empty_handle);
   // It was zero; the result is undefined.
   mov(eax, isolate()->factory()->undefined_value());
@@ -1686,11 +1835,9 @@
   mov(edi, eax);
   mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   mov(eax, Immediate(delete_extensions));
-  call(Operand(eax));
+  call(eax);
   mov(eax, edi);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -1702,15 +1849,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext) {
-  // Set the entry point and jump to the C entry runtime stub.
-  mov(ebx, Immediate(ext));
-  CEntryStub ces(1);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   // This macro takes the dst register to make the code more readable
   // at the call sites. However, the dst register has to be ecx to
@@ -1720,10 +1858,10 @@
   if (call_kind == CALL_AS_FUNCTION) {
     // Set to some non-zero smi by updating the least significant
     // byte.
-    mov_b(Operand(dst), 1 << kSmiTagSize);
+    mov_b(dst, 1 << kSmiTagSize);
   } else {
     // Set to smi zero by clearing the register.
-    xor_(dst, Operand(dst));
+    xor_(dst, dst);
   }
 }
 
@@ -1768,7 +1906,7 @@
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
-      cmp(expected.reg(), Operand(actual.reg()));
+      cmp(expected.reg(), actual.reg());
       j(equal, &invoke);
       ASSERT(actual.reg().is(eax));
       ASSERT(expected.reg().is(ebx));
@@ -1780,7 +1918,7 @@
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (!code_constant.is_null()) {
       mov(edx, Immediate(code_constant));
-      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
     } else if (!code_operand.is_reg(edx)) {
       mov(edx, code_operand);
     }
@@ -1806,6 +1944,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
                  &done, flag, Label::kNear, call_wrapper,
@@ -1831,8 +1972,11 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
-  Operand dummy(eax);
+  Operand dummy(eax, 0);
   InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
                  call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
@@ -1854,6 +1998,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1866,36 +2013,32 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Get the function and setup the context.
-  mov(edi, Immediate(Handle<JSFunction>(function)));
+  mov(edi, Immediate(function));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-               expected, actual, flag, call_wrapper, call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
-               flag, call_wrapper, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+             expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -1906,6 +2049,7 @@
              expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
+
 void MacroAssembler::GetBuiltinFunction(Register target,
                                         Builtins::JavaScript id) {
   // Load the JavaScript builtin function from the builtins object.
@@ -1915,6 +2059,7 @@
                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
 }
 
+
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   ASSERT(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
@@ -2016,7 +2161,7 @@
     ret(bytes_dropped);
   } else {
     pop(scratch);
-    add(Operand(esp), Immediate(bytes_dropped));
+    add(esp, Immediate(bytes_dropped));
     push(scratch);
     ret(0);
   }
@@ -2025,7 +2170,7 @@
 
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
-    add(Operand(esp), Immediate(stack_elements * kPointerSize));
+    add(esp, Immediate(stack_elements * kPointerSize));
   }
 }
 
@@ -2168,13 +2313,19 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   push(eax);
   push(Immediate(p0));
   push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   int3();
 }
@@ -2197,7 +2348,7 @@
   ASSERT(is_uintn(power + HeapNumber::kExponentBias,
                   HeapNumber::kExponentBits));
   mov(scratch, Immediate(power + HeapNumber::kExponentBias));
-  movd(dst, Operand(scratch));
+  movd(dst, scratch);
   psllq(dst, HeapNumber::kMantissaBits);
 }
 
@@ -2223,8 +2374,8 @@
                                                          Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
-  mov(scratch1, Operand(object1));
-  and_(scratch1, Operand(object2));
+  mov(scratch1, object1);
+  and_(scratch1, object2);
   JumpIfSmi(scratch1, failure);
 
   // Load instance type for both strings.
@@ -2253,12 +2404,12 @@
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
-    sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
     ASSERT(IsPowerOf2(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
-    sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+    sub(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
@@ -2266,27 +2417,39 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
   // Trashing eax is ok as it will be the return value.
-  mov(Operand(eax), Immediate(function));
+  mov(eax, Immediate(function));
   CallCFunction(eax, num_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
-  call(Operand(function));
+  call(function);
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -2308,6 +2471,198 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+           static_cast<uint8_t>(mask));
+  } else {
+    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black,
+                                 Label::Distance on_black_near) {
+  HasColor(object, scratch0, scratch1,
+           on_black, on_black_near,
+           1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              Label::Distance has_color_distance,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
+  j(zero, &word_boundary, Label::kNear);
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  jmp(&other_color, Label::kNear);
+
+  bind(&word_boundary);
+  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  and_(bitmap_reg, addr_reg);
+  mov(ecx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shr(ecx, shift);
+  and_(ecx,
+       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+  add(bitmap_reg, ecx);
+  mov(ecx, addr_reg);
+  shr(ecx, kPointerSizeLog2);
+  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+  mov(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    add(mask_scratch, mask_scratch);
+    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = ecx;  // Holds map while checking type.
+  Register length = ecx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  mov(map, FieldOperand(value, HeapObject::kMapOffset));
+  cmp(map, FACTORY->heap_number_map());
+  j(not_equal, &not_heap_number, Label::kNear);
+  mov(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = ecx;
+  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  test_b(instance_type, kExternalStringTag);
+  j(zero, &not_external, Label::kNear);
+  mov(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  add(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+  // by 2. If we multiply the string length as smi by this, it still
+  // won't overflow a 32-bit value.
+  ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
+  ASSERT(SeqAsciiString::kMaxSize <=
+         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+      length);
+  if (FLAG_debug_code) {
+    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+    Check(less_equal, "Live Bytes Count overflow chunk size");
+  }
+
+  bind(&done);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 8c5f5e9..03ec28a 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,6 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -50,6 +51,13 @@
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -61,42 +69,130 @@
 
   // ---------------------------------------------------------------------------
   // GC Support
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,  // equal for new space, not_equal otherwise.
-                  Label* branch,
-                  Label::Distance branch_near = Label::kFar);
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If offset is zero, then the scratch register
-  // contains the array index into the elements array represented as a
-  // Smi. All registers are clobbered by the operation. RecordWrite
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, zero, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_zero, branch, distance);
+  }
+
+  // Check if an object has a given incremental marking color.  Also uses ecx!
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                Label::Distance has_color_distance,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a
+  // Smi. All registers are clobbered by the operation RecordWriteArray
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -105,15 +201,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame. Expects the number of
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
@@ -159,6 +246,15 @@
   void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag,
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind) {
+    InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+  }
+
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
@@ -182,7 +278,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -225,6 +321,29 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register key,
+                                   Register scratch1,
+                                   XMMRegister scratch2,
+                                   Label* fail,
+                                   bool specialize_for_processor);
+
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object)
@@ -277,7 +396,7 @@
   void SmiTag(Register reg) {
     STATIC_ASSERT(kSmiTag == 0);
     STATIC_ASSERT(kSmiTagSize == 1);
-    add(reg, Operand(reg));
+    add(reg, reg);
   }
   void SmiUntag(Register reg) {
     sar(reg, kSmiTagSize);
@@ -332,9 +451,10 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.  The return
-  // address must be pushed before calling this helper.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  // Push a new try handler and link it into try handler chain.
+  void PushTryHandler(CodeLocation try_location,
+                      HandlerType type,
+                      int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
@@ -354,7 +474,6 @@
                               Register scratch,
                               Label* miss);
 
-  void GetNumberHash(Register r0, Register scratch);
 
   void LoadFromNumberDictionary(Label* miss,
                                 Register elements,
@@ -466,9 +585,19 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
+  // Check a boolean-bit of a Smi field.
+  void BooleanBitTest(Register object, int field_offset, int bit_index);
+
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
@@ -485,7 +614,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -503,19 +633,9 @@
   // Call a code stub.  Generate the code if necessary.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).  Generate the code if necessary.
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -523,19 +643,9 @@
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(ExternalReference ref, int num_arguments);
 
@@ -546,23 +656,11 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  // Convenience function: tail call a runtime routine (jump). Try to generate
-  // the code if necessary. Do not perform a GC but instead return a retry after
-  // GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                  int num_arguments,
-                                                  int result_size);
-
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
   // etc., not pushed. The argument count assumes all arguments are word sized.
@@ -587,19 +685,15 @@
   // stores the pointer to the reserved slot into esi.
   void PrepareCallApiFunction(int argc);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers ebx, edi and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers ebx, edi and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
-
   // ---------------------------------------------------------------------------
   // Utilities
 
@@ -668,6 +762,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // String utilities.
@@ -691,9 +788,14 @@
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -704,14 +806,10 @@
                       const Operand& code_operand,
                       Label* done,
                       InvokeFlag flag,
-                      Label::Distance done_near = Label::kFar,
+                      Label::Distance done_distance,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue();
   void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
@@ -730,6 +828,24 @@
                                                     Register scratch,
                                                     bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* condition_met,
+                  Label::Distance condition_met_distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -765,26 +881,26 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
-                                   Register index,
-                                   ScaleFactor scale,
-                                   int offset) {
+inline Operand FieldOperand(Register object,
+                            Register index,
+                            ScaleFactor scale,
+                            int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
   return ContextOperand(esi, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index d175d9e..dbf01ab 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,7 +134,7 @@
 
 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    __ add(Operand(edi), Immediate(by * char_size()));
+    __ add(edi, Immediate(by * char_size()));
   }
 }
 
@@ -152,8 +152,8 @@
   CheckPreemption();
   // Pop Code* offset from backtrack stack, add Code* and jump to location.
   Pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -219,7 +219,7 @@
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
     // Check that there are at least str.length() characters left in the input.
-    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+    __ cmp(edi, Immediate(-(byte_offset + byte_length)));
     BranchOrBacktrack(greater, on_failure);
   }
 
@@ -288,7 +288,7 @@
   Label fallthrough;
   __ cmp(edi, Operand(backtrack_stackpointer(), 0));
   __ j(not_equal, &fallthrough);
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));  // Pop.
   BranchOrBacktrack(no_condition, on_equal);
   __ bind(&fallthrough);
 }
@@ -300,7 +300,7 @@
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
-  __ sub(ebx, Operand(edx));  // Length of capture.
+  __ sub(ebx, edx);  // Length of capture.
 
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@
     __ push(backtrack_stackpointer());
     // After this, the eax, ecx, and edi registers are available.
 
-    __ add(edx, Operand(esi));  // Start of capture
-    __ add(edi, Operand(esi));  // Start of text to match against capture.
-    __ add(ebx, Operand(edi));  // End of text to match against capture.
+    __ add(edx, esi);  // Start of capture
+    __ add(edi, esi);  // Start of text to match against capture.
+    __ add(ebx, edi);  // End of text to match against capture.
 
     Label loop;
     __ bind(&loop);
@@ -339,15 +339,15 @@
     __ movzx_b(ecx, Operand(edx, 0));
     __ or_(ecx, 0x20);
 
-    __ cmp(eax, Operand(ecx));
+    __ cmp(eax, ecx);
     __ j(not_equal, &fail);
 
     __ bind(&loop_increment);
     // Increment pointers into match and capture strings.
-    __ add(Operand(edx), Immediate(1));
-    __ add(Operand(edi), Immediate(1));
+    __ add(edx, Immediate(1));
+    __ add(edi, Immediate(1));
     // Compare to end of match, and loop if not done.
-    __ cmp(edi, Operand(ebx));
+    __ cmp(edi, ebx);
     __ j(below, &loop);
     __ jmp(&success);
 
@@ -361,9 +361,9 @@
     // Restore original value before continuing.
     __ pop(backtrack_stackpointer());
     // Drop original value of character position.
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
     // Compute new value of character position after the matched part.
-    __ sub(edi, Operand(esi));
+    __ sub(edi, esi);
   } else {
     ASSERT(mode_ == UC16);
     // Save registers before calling C function.
@@ -389,16 +389,19 @@
     // Set byte_offset2.
     // Found by adding negative string-end offset of current position (edi)
     // to end of string.
-    __ add(edi, Operand(esi));
+    __ add(edi, esi);
     __ mov(Operand(esp, 1 * kPointerSize), edi);
     // Set byte_offset1.
     // Start of capture, where edx already holds string-end negative offset.
-    __ add(edx, Operand(esi));
+    __ add(edx, esi);
     __ mov(Operand(esp, 0 * kPointerSize), edx);
 
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(compare, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(compare, argument_count);
+    }
     // Pop original values before reacting on result value.
     __ pop(ebx);
     __ pop(backtrack_stackpointer());
@@ -406,10 +409,10 @@
     __ pop(esi);
 
     // Check if function returned non-zero for success or zero for failure.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     BranchOrBacktrack(zero, on_no_match);
     // On success, increment position by length of capture.
-    __ add(edi, Operand(ebx));
+    __ add(edi, ebx);
   }
   __ bind(&fallthrough);
 }
@@ -425,7 +428,7 @@
   // Find length of back-referenced capture.
   __ mov(edx, register_location(start_reg));
   __ mov(eax, register_location(start_reg + 1));
-  __ sub(eax, Operand(edx));  // Length to check.
+  __ sub(eax, edx);  // Length to check.
   // Fail on partial or illegal capture (start of capture after end of capture).
   BranchOrBacktrack(less, on_no_match);
   // Succeed on empty capture (including no capture)
@@ -433,7 +436,7 @@
 
   // Check that there are sufficient characters left in the input.
   __ mov(ebx, edi);
-  __ add(ebx, Operand(eax));
+  __ add(ebx, eax);
   BranchOrBacktrack(greater, on_no_match);
 
   // Save register to make it available below.
@@ -441,7 +444,7 @@
 
   // Compute pointers to match string and capture string
   __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
-  __ add(edx, Operand(esi));  // Start of capture.
+  __ add(edx, esi);  // Start of capture.
   __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
 
   Label loop;
@@ -456,10 +459,10 @@
   }
   __ j(not_equal, &fail);
   // Increment pointers into capture and match string.
-  __ add(Operand(edx), Immediate(char_size()));
-  __ add(Operand(ebx), Immediate(char_size()));
+  __ add(edx, Immediate(char_size()));
+  __ add(ebx, Immediate(char_size()));
   // Check if we have reached end of match area.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(below, &loop);
   __ jmp(&success);
 
@@ -471,7 +474,7 @@
   __ bind(&success);
   // Move current character position to position after match.
   __ mov(edi, ecx);
-  __ sub(Operand(edi), esi);
+  __ sub(edi, esi);
   // Restore backtrack stackpointer.
   __ pop(backtrack_stackpointer());
 
@@ -574,17 +577,17 @@
     return true;
   case '.': {
     // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     BranchOrBacktrack(below_equal, on_no_match);
     if (mode_ == UC16) {
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 0x2029 - 0x2028);
       BranchOrBacktrack(below_equal, on_no_match);
     }
@@ -593,7 +596,7 @@
   case 'w': {
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -607,7 +610,7 @@
     Label done;
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       __ j(above, &done);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -627,10 +630,10 @@
   case 'n': {
     // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
     // The opposite of '.'.
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     if (mode_ == ASCII) {
       BranchOrBacktrack(above, on_no_match);
@@ -641,7 +644,7 @@
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 1);
       BranchOrBacktrack(above, on_no_match);
       __ bind(&done);
@@ -668,7 +671,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // code is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(ebp);
   __ mov(ebp, esp);
   // Save callee-save registers. Order here should correspond to order of
@@ -699,7 +707,7 @@
 
   __ bind(&stack_limit_hit);
   CallCheckStackGuardState(ebx);
-  __ or_(eax, Operand(eax));
+  __ or_(eax, eax);
   // If returned value is non-zero, we exit with the returned value as result.
   __ j(not_zero, &exit_label_);
 
@@ -708,13 +716,13 @@
   __ mov(ebx, Operand(ebp, kStartIndex));
 
   // Allocate space on stack for registers.
-  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+  __ sub(esp, Immediate(num_registers_ * kPointerSize));
   // Load string length.
   __ mov(esi, Operand(ebp, kInputEnd));
   // Load input position.
   __ mov(edi, Operand(ebp, kInputStart));
   // Set up edi to be negative offset from string end.
-  __ sub(edi, Operand(esi));
+  __ sub(edi, esi);
 
   // Set eax to address of char before start of the string.
   // (effectively string position -1).
@@ -736,7 +744,7 @@
     Label init_loop;
     __ bind(&init_loop);
     __ mov(Operand(ebp, ecx, times_1, +0), eax);
-    __ sub(Operand(ecx), Immediate(kPointerSize));
+    __ sub(ecx, Immediate(kPointerSize));
     __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
     __ j(greater, &init_loop);
   }
@@ -777,12 +785,12 @@
       if (mode_ == UC16) {
         __ lea(ecx, Operand(ecx, edx, times_2, 0));
       } else {
-        __ add(ecx, Operand(edx));
+        __ add(ecx, edx);
       }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ mov(eax, register_location(i));
         // Convert to index from start of string, not end.
-        __ add(eax, Operand(ecx));
+        __ add(eax, ecx);
         if (mode_ == UC16) {
           __ sar(eax, 1);  // Convert byte index to character index.
         }
@@ -819,7 +827,7 @@
     __ push(edi);
 
     CallCheckStackGuardState(ebx);
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ j(not_zero, &exit_label_);
@@ -854,7 +862,7 @@
     __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     __ j(equal, &exit_with_exception);
     // Otherwise use return value as new stack pointer.
     __ mov(backtrack_stackpointer(), eax);
@@ -1133,6 +1141,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1183,8 +1196,8 @@
 
 void RegExpMacroAssemblerIA32::SafeReturn() {
   __ pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -1196,14 +1209,14 @@
 void RegExpMacroAssemblerIA32::Push(Register source) {
   ASSERT(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
 }
 
 
 void RegExpMacroAssemblerIA32::Push(Immediate value) {
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), value);
 }
 
@@ -1212,7 +1225,7 @@
   ASSERT(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ab62764..722d718 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -66,8 +66,8 @@
     __ j(not_equal, &miss);
 
     // Jump to the first instruction in the code stub.
-    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(extra));
+    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(extra);
 
     __ bind(&miss);
   } else {
@@ -92,8 +92,8 @@
     __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
 
     // Jump to the first instruction in the code stub.
-    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(offset));
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
 
     // Pop at miss.
     __ bind(&miss);
@@ -107,12 +107,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                     Label* miss_label,
-                                                     Register receiver,
-                                                     String* name,
-                                                     Register r0,
-                                                     Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register r0,
+                                             Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -142,19 +142,14 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  MaybeObject* result =
-      StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                         miss_label,
-                                                         &done,
-                                                         properties,
-                                                         name,
-                                                         r1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     properties,
+                                                     name,
+                                                     r1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
 }
 
 
@@ -165,25 +160,23 @@
                               Register scratch,
                               Register extra,
                               Register extra2) {
-  Isolate* isolate = Isolate::Current();
   Label miss;
-  USE(extra2);  // The register extra2 is not used on the ia32 platform.
 
-  // Make sure that code is valid. The shifting code relies on the
-  // entry size being 8.
+  // Assert that code is valid.  The shifting code relies on the entry size
+  // being 8.
   ASSERT(sizeof(Entry) == 8);
 
-  // Make sure the flags does not name a specific type.
+  // Assert the flags do not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
 
-  // Make sure that there are no register conflicts.
+  // Assert that there are no register conflicts.
   ASSERT(!scratch.is(receiver));
   ASSERT(!scratch.is(name));
   ASSERT(!extra.is(receiver));
   ASSERT(!extra.is(name));
   ASSERT(!extra.is(scratch));
 
-  // Check scratch and extra registers are valid, and extra2 is unused.
+  // Assert scratch and extra registers are valid, and extra2 is unused.
   ASSERT(!scratch.is(no_reg));
   ASSERT(extra2.is(no_reg));
 
@@ -197,19 +190,19 @@
   __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
+  ProbeTable(isolate(), masm, flags, kPrimary, name, scratch, extra);
 
   // Primary miss: Compute hash for secondary probe.
   __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
   __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, flags);
   __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(scratch, Operand(name));
-  __ add(Operand(scratch), Immediate(flags));
+  __ sub(scratch, name);
+  __ add(scratch, Immediate(flags));
   __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
+  ProbeTable(isolate(), masm, flags, kSecondary, name, scratch, extra);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
@@ -228,14 +221,17 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   // Check we're still in the same context.
   __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
          masm->isolate()->global());
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(masm->isolate()->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(masm->isolate()->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
   // Load the prototype from the initial map.
@@ -318,7 +314,7 @@
                                                  Register scratch2,
                                                  Label* miss_label) {
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, Operand(scratch1));
+  __ mov(eax, scratch1);
   __ ret(0);
 }
 
@@ -327,8 +323,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -348,12 +346,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+  __ mov(scratch, Immediate(interceptor));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -361,11 +359,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallExternalReference(
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -406,15 +405,15 @@
   //                                          frame.
   // -----------------------------------
   __ pop(scratch);
-  __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
+  __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
   __ push(scratch);
 }
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- esp[0]              : return address
   //  -- esp[4]              : object passing the type check
@@ -429,30 +428,25 @@
   //  -- esp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ mov(edi, Immediate(function));
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ mov(Operand(esp, 2 * kPointerSize), edi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ mov(ecx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ mov(ecx, api_call_info);
     __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
     __ mov(Operand(esp, 3 * kPointerSize), ebx);
   } else {
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(Handle<Object>(call_data)));
+    __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
   }
 
   // Prepare arguments.
   __ lea(eax, Operand(esp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiArgc = 1;  // API function gets reference to the v8::Arguments.
 
   // Allocate the v8::Arguments structure in the arguments' space since
@@ -462,7 +456,7 @@
   __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
 
   __ mov(ApiParameterOperand(1), eax);  // v8::Arguments::implicit_args_.
-  __ add(Operand(eax), Immediate(argc * kPointerSize));
+  __ add(eax, Immediate(argc * kPointerSize));
   __ mov(ApiParameterOperand(2), eax);  // v8::Arguments::values_.
   __ Set(ApiParameterOperand(3), Immediate(argc));  // v8::Arguments::length_.
   // v8::Arguments::is_construct_call_.
@@ -472,12 +466,10 @@
   __ lea(eax, ApiParameterOperand(1));
   __ mov(ApiParameterOperand(0), eax);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -486,22 +478,22 @@
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
                           Register name,
-                          Code::ExtraICState extra_ic_state)
+                          Code::ExtraICState extra_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
         name_(name),
-        extra_ic_state_(extra_ic_state) {}
+        extra_state_(extra_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -509,45 +501,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -556,16 +530,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -581,9 +553,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -596,10 +568,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -610,11 +583,9 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result =
-          GenerateFastApiCall(masm, optimization, arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
-      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
@@ -633,33 +604,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -668,27 +633,30 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
     __ j(not_equal, interceptor_succeeded);
@@ -697,38 +665,32 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  Code::ExtraICState extra_ic_state_;
+  Code::ExtraICState extra_state_;
 };
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -751,12 +713,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ push(Immediate(Handle<Map>(transition)));
+    __ push(Immediate(transition));
     __ push(eax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -767,11 +729,11 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
     __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-           Immediate(Handle<Map>(transition)));
+           Immediate(transition));
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -786,8 +748,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -797,8 +763,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register eax).
@@ -809,70 +779,58 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
+  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
   if (Serializer::enabled()) {
-    __ mov(scratch, Immediate(Handle<Object>(cell)));
+    __ mov(scratch, Immediate(cell));
     __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
+           Immediate(the_hole));
   } else {
-    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
-           Immediate(masm->isolate()->factory()->the_hole_value()));
+    __ cmp(Operand::Cell(cell), Immediate(the_hole));
   }
   __ j(not_equal, miss);
-  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
-
 #undef __
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -882,7 +840,7 @@
 
   // Keep track of the current object in register reg.
   Register reg = object_reg;
-  JSObject* current = object;
+  Handle<JSObject> current = object;
   int depth = 0;
 
   if (save_at_depth == depth) {
@@ -891,79 +849,58 @@
 
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
-  while (current != holder) {
-    depth++;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
-      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
-             Immediate(Handle<Map>(current->map())));
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      Handle<Map> current_map(current->map());
+      if (in_new_space) {
+        // Save the map in scratch1 for later.
+        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+        __ cmp(scratch1, Immediate(current_map));
+      } else {
+        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+               Immediate(current_map));
+      }
       // Branch on the result of the map check.
       __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ mov(reg, Handle<JSObject>(prototype));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (in_new_space) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ mov(reg, prototype);
+      }
     }
 
     if (save_at_depth == depth) {
@@ -973,7 +910,7 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
-  ASSERT(current == holder);
+  ASSERT(current.is_identical_to(holder));
 
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -987,40 +924,33 @@
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1028,40 +958,37 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch3.is(reg));
   __ pop(scratch3);  // Get return address to place it below.
 
   __ push(receiver);  // receiver
-  __ mov(scratch2, Operand(esp));
+  __ mov(scratch2, esp);
   ASSERT(!scratch2.is(reg));
   __ push(reg);  // holder
   // Push data from AccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback_handle->data())) {
-    __ mov(scratch1, Immediate(callback_handle));
+  if (isolate()->heap()->InNewSpace(callback->data())) {
+    __ mov(scratch1, Immediate(callback));
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
   } else {
-    __ push(Immediate(Handle<Object>(callback_handle->data())));
+    __ push(Immediate(Handle<Object>(callback->data())));
   }
 
   // Save a pointer to where we pushed the arguments pointer.
@@ -1073,10 +1000,6 @@
 
   __ push(scratch3);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // 3 elements array for v8::Agruments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
   const int kStackSpace = 5;
@@ -1084,48 +1007,49 @@
 
   __ PrepareCallApiFunction(kApiArgc);
   __ mov(ApiParameterOperand(0), ebx);  // name.
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), ebx);  // arguments pointer.
 
   // Emitting a stub call may try to allocate (if the code is not
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<Object> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder,
-                  scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ mov(eax, Handle<Object>(value));
+  __ mov(eax, value);
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1141,9 +1065,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1158,47 +1082,49 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ cmp(eax, factory()->no_interceptor_result_sentinel());
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ cmp(eax, factory()->no_interceptor_result_sentinel());
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1210,15 +1136,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), eax, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1227,7 +1153,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+      __ mov(holder_reg, Immediate(callback));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1257,17 +1183,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+    __ cmp(ecx, Immediate(name));
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1280,7 +1206,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(edx, miss);
   }
 
@@ -1289,19 +1215,20 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Immediate(cell));
     __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, Operand::Cell(cell));
   }
 
   // Check that the cell contains the same function.
-  if (isolate()->heap()->InNewSpace(function)) {
+  if (isolate()->heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1314,31 +1241,26 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
-    __ j(not_equal, miss);
   } else {
-    __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
-    __ j(not_equal, miss);
+    __ cmp(edi, Immediate(function));
   }
+  __ j(not_equal, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ jmp(code, RelocInfo::CODE_TARGET);
 }
 
 
-MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
-    JSObject* object,
-    JSObject* holder,
-    int index,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
+                                                int index,
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1376,7 +1298,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -1384,19 +1306,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1406,8 +1328,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss;
@@ -1421,9 +1343,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1441,21 +1362,25 @@
     __ j(not_equal, &call_builtin);
 
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
 
       // Get the array's length into eax and calculate new length.
       __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
       STATIC_ASSERT(kSmiTagSize == 1);
       STATIC_ASSERT(kSmiTag == 0);
-      __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+      __ add(eax, Immediate(Smi::FromInt(argc)));
 
       // Get the element's length into ecx.
       __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
-      __ cmp(eax, Operand(ecx));
+      __ cmp(eax, ecx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+      __ JumpIfNotSmi(ecx, &with_write_barrier);
+
       // Save new length.
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
@@ -1463,20 +1388,27 @@
       __ lea(edx, FieldOperand(ebx,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
       __ mov(Operand(edx, 0), ecx);
 
-      // Check if value is a smi.
-      __ JumpIfNotSmi(ecx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(ebx, ecx, equal, &exit);
+      __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(edi, &call_builtin);
 
-      __ RecordWriteHelper(ebx, edx, ecx);
+      // Save new length.
+      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+      // Push the element.
+      __ lea(edx, FieldOperand(ebx,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ mov(Operand(edx, 0), ecx);
+
+      __ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
+
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1484,6 +1416,19 @@
         __ jmp(&call_builtin);
       }
 
+      __ mov(edi, Operand(esp, argc * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(edi, &no_fast_elements_check);
+      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
+      __ bind(&no_fast_elements_check);
+
+      // We could be lucky and the elements array could be at the top of
+      // new-space.  In this case we can just grow it in place by moving the
+      // allocation pointer up.
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1497,33 +1442,43 @@
       __ lea(edx, FieldOperand(ebx,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ cmp(edx, Operand(ecx));
+      __ cmp(edx, ecx);
       __ j(not_equal, &call_builtin);
-      __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+      __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
       __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
       __ j(above, &call_builtin);
 
       // We fit and could grow elements.
       __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
 
       // Push the argument...
-      __ mov(Operand(edx, 0), ecx);
+      __ mov(Operand(edx, 0), edi);
       // ... and fill the rest with holes.
       for (int i = 1; i < kAllocationDelta; i++) {
         __ mov(Operand(edx, i * kPointerSize),
                Immediate(factory()->the_hole_value()));
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to edx as finish sequence assumes it's here.
       __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
       __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
              Immediate(Smi::FromInt(kAllocationDelta)));
+
+      // NOTE: This only happen in new-space, where we don't
+      // care about the black-byte-count on pages. Otherwise we should
+      // update that too if the object is black.
+
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1535,19 +1490,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1557,8 +1512,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) {
-    return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   Label miss, return_undefined, call_builtin;
@@ -1571,9 +1526,8 @@
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
-  CheckPrototypes(JSObject::cast(object), edx,
-                  holder, ebx,
-                  eax, edi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1585,7 +1539,7 @@
 
   // Get the array's length into ecx and calculate new length.
   __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
-  __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+  __ sub(ecx, Immediate(Smi::FromInt(1)));
   __ j(negative, &return_undefined);
 
   // Get the last element.
@@ -1594,7 +1548,7 @@
   __ mov(eax, FieldOperand(ebx,
                            ecx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
+  __ cmp(eax, Immediate(factory()->the_hole_value()));
   __ j(equal, &call_builtin);
 
   // Set the array's length.
@@ -1618,20 +1572,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1641,8 +1594,8 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return isolate()->heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
   }
 
   const int argc = arguments().immediate();
@@ -1653,7 +1606,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1665,12 +1618,95 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             eax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
 
   Register receiver = ebx;
   Register index = edi;
+  Register result = eax;
+  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+  } else {
+    __ Set(index, Immediate(factory()->undefined_value()));
+  }
+
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ Set(eax, Immediate(factory()->nan_value()));
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in ecx.
+  __ Set(ecx, Immediate(name));
+  __ bind(&name_miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : function name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || !cell.is_null()) {
+    return Handle<Code>::null();
+  }
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            eax,
+                                            &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  eax, holder, ebx, edx, edi, name, &miss);
+
+  Register receiver = eax;
+  Register index = edi;
   Register scratch = edx;
   Register result = eax;
   __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
@@ -1680,107 +1716,19 @@
     __ Set(index, Immediate(factory()->undefined_value()));
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ Set(eax, Immediate(factory()->nan_value()));
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
-  __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : function name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) {
-    return heap()->undefined_value();
-  }
-
-  const int argc = arguments().immediate();
-
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            eax,
-                                            &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, edi, name, &miss);
-
-  Register receiver = eax;
-  Register index = edi;
-  Register scratch1 = ebx;
-  Register scratch2 = edx;
-  Register result = eax;
-  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
-  } else {
-    __ Set(index, Immediate(factory()->undefined_value()));
-  }
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1790,22 +1738,21 @@
 
   __ bind(&miss);
   // Restore function name in ecx.
-  __ Set(ecx, Immediate(Handle<String>(name)));
+  __ Set(ecx, Immediate(name));
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1819,23 +1766,22 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1851,17 +1797,17 @@
   // Convert the smi code to uint16.
   __ and_(code, Immediate(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, eax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, eax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1869,19 +1815,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1891,7 +1837,7 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(SSE2)) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   CpuFeatures::Scope use_sse2(SSE2);
@@ -1901,23 +1847,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1998,19 +1945,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2024,23 +1971,24 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return isolate()->heap()->undefined_value();
+    return Handle<Code>::null();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2058,10 +2006,10 @@
   __ sar(ebx, kBitsPerInt - 1);
 
   // Do bitwise not or do nothing depending on ebx.
-  __ xor_(eax, Operand(ebx));
+  __ xor_(eax, ebx);
 
   // Add 1 or do nothing depending on ebx.
-  __ sub(eax, Operand(ebx));
+  __ sub(eax, ebx);
 
   // If the result is still negative, go to the slow case.
   // This only happens for the most negative smi.
@@ -2102,30 +2050,29 @@
 
   __ bind(&miss);
   // ecx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2144,11 +2091,11 @@
 
   // Allocate space for v8::Arguments implicit values. Must be initialized
   // before calling any runtime function.
-  __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), edx, holder,
-                  ebx, eax, edi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2156,27 +2103,24 @@
 
   // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
   // duplicate of return address and will be overwritten.
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
-  __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(
-    Object* object,
-    JSObject* holder,
-    JSFunction* function,
-    String* name,
-    CheckType check) {
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
+                                                   CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2186,16 +2130,14 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2210,15 +2152,13 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(isolate()->counters()->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, edi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
+                      edi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2229,28 +2169,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(edx, &fast);
@@ -2260,18 +2197,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
@@ -2282,17 +2219,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, edi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            eax, holder, ebx, edx, edi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2300,17 +2238,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2325,24 +2262,15 @@
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         edx,
-                                         ebx,
-                                         edi,
-                                         eax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
+                   &miss);
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2361,7 +2289,7 @@
 
   // Invoke the function.
   __ mov(edi, eax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -2369,20 +2297,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(
-    JSObject* object,
-    GlobalObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2392,23 +2319,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2423,40 +2344,31 @@
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
-  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-                  expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2466,27 +2378,23 @@
   Label miss;
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     edx, ecx, ebx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
-  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
+  __ mov(ecx, Immediate(name));  // restore name
   Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2514,7 +2422,7 @@
 
   __ pop(ebx);  // remove the return address
   __ push(edx);  // receiver
-  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback info
+  __ push(Immediate(callback));  // callback info
   __ push(ecx);  // name
   __ push(eax);  // value
   __ push(ebx);  // restore return address
@@ -2534,8 +2442,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2583,9 +2492,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2599,13 +2509,9 @@
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss);
 
-
   // Compute the cell operand to use.
-  Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
-  if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-    cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-  }
+  __ mov(ebx, Immediate(cell));
+  Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
 
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
@@ -2616,6 +2522,7 @@
 
   // Store the value in the cell.
   __ mov(cell_operand, eax);
+  // No write barrier here, because cells are always rescanned.
 
   // Return the value (register eax).
   Counters* counters = isolate()->counters();
@@ -2633,10 +2540,10 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2649,16 +2556,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     edx, ecx, ebx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2667,39 +2569,37 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(edx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_jsarray, elements_kind).GetCode();
+
+  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2707,28 +2607,33 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(edx, &miss);
-
-  Register map_reg = ebx;
-  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
-  int receiver_count = receiver_maps->length();
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    __ cmp(map_reg, map);
-    __ j(equal, Handle<Code>(handler_ics->at(current)));
+  __ JumpIfSmi(edx, &miss, Label::kNear);
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+  // ebx: receiver->map().
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    __ cmp(edi, receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i));
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ mov(ebx, Immediate(transitioned_maps->at(i)));
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
   __ bind(&miss);
   Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2749,15 +2654,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  edx,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2769,14 +2667,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2793,10 +2691,11 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2804,13 +2703,8 @@
   // -----------------------------------
   Label miss;
 
-  MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
-                                             edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2819,10 +2713,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<Object> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2839,9 +2733,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2849,21 +2743,13 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          eax,
-                          ecx,
-                          edx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+                          name, &miss);
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2759,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2888,7 +2775,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual loads. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(eax, &miss);
   }
 
@@ -2897,10 +2784,10 @@
 
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, Immediate(cell));
     __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, Operand::Cell(cell));
   }
 
   // Check for deleted property if property can actually be deleted.
@@ -2926,9 +2813,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- eax    : key
@@ -2941,7 +2828,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2955,11 +2842,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2971,18 +2858,13 @@
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
-                                             ecx, edi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
+  GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+                       name, &miss);
 
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2991,10 +2873,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<Object> value) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3006,11 +2889,11 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
-                       value, name, &miss);
+  GenerateLoadConstant(
+      receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_constant_function(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3020,9 +2903,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3034,21 +2918,13 @@
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          edx,
-                          eax,
-                          ecx,
-                          ebx,
-                          edi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3058,7 +2934,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3070,7 +2947,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3083,7 +2960,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3095,7 +2973,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3108,7 +2986,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3120,7 +2999,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3133,31 +3012,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Code* stub;
+
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(edx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3170,22 +3047,22 @@
   __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    __ cmp(map_reg, map);
-    __ j(equal, Handle<Code>(handler_ics->at(current)));
+    __ cmp(map_reg, receiver_maps->at(current));
+    __ j(equal, handler_ics->at(current));
   }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- eax : argc
   //  -- edi : constructor
@@ -3224,12 +3101,8 @@
   // ebx: initial map
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
   __ shl(ecx, kPointerSizeLog2);
-  __ AllocateInNewSpace(ecx,
-                        edx,
-                        ecx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // ebx: initial map
@@ -3260,7 +3133,7 @@
   // edi: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3298,7 +3171,7 @@
   // Move argc to ebx and retrieve and tag the JSObject to return.
   __ mov(ebx, eax);
   __ pop(eax);
-  __ or_(Operand(eax), Immediate(kHeapObjectTag));
+  __ or_(eax, Immediate(kHeapObjectTag));
 
   // Remove caller arguments and receiver from the stack and return.
   __ pop(ecx);
@@ -3312,9 +3185,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Handle<Code> generic_construct_stub =
-      isolate()->builtins()->JSConstructStubGeneric();
-  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ jmp(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3506,8 +3378,7 @@
   // If we fail allocation of the HeapNumber, we still have a value on
   // top of the FPU stack. Remove it.
   __ bind(&failed_allocation);
-  __ ffree();
-  __ fincstp();
+  __ fstp(0);
   // Fall through to slow case.
 
   // Slow case: Jump to runtime.
@@ -3679,10 +3550,10 @@
             // If the value is NaN or +/-infinity, the result is 0x80000000,
             // which is automatically zero when taken mod 2^n, n < 32.
             __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-            __ sub(Operand(esp), Immediate(2 * kPointerSize));
+            __ sub(esp, Immediate(2 * kPointerSize));
             __ fisttp_d(Operand(esp, 0));
             __ pop(ebx);
-            __ add(Operand(esp), Immediate(kPointerSize));
+            __ add(esp, Immediate(kPointerSize));
           } else {
             ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
@@ -3824,8 +3695,7 @@
   // A value was pushed on the floating point stack before the allocation, if
   // the allocation fails it needs to be removed.
   if (!CpuFeatures::IsSupported(SSE2)) {
-    __ ffree();
-    __ fincstp();
+    __ fstp(0);
   }
   Handle<Code> slow_ic =
       masm->isolate()->builtins()->KeyedLoadIC_Slow();
@@ -3838,15 +3708,17 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3870,11 +3742,28 @@
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ mov(edx, Operand(eax));
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
-  __ RecordWrite(edi, 0, edx, ecx);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(eax, &transition_elements_kind);
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ mov(FieldOperand(edi,
+                        ecx,
+                        times_half_pointer_size,
+                        FixedArray::kHeaderSize), eax);
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    // Do the store and update the write barrier.
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ lea(ecx, FieldOperand(edi,
+                             ecx,
+                             times_half_pointer_size,
+                             FixedArray::kHeaderSize));
+    __ mov(Operand(ecx, 0), eax);
+    // Make sure to preserve the value in register eax.
+    __ mov(edx, eax);
+    __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3884,6 +3773,11 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  // Handle transition to other elements kinds without using the generic stub.
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3896,8 +3790,7 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic, transition_elements_kind;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3918,59 +3811,13 @@
   }
   __ j(above_equal, &miss_force_generic);
 
-  __ JumpIfSmi(eax, &smi_value, Label::kNear);
-
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
-              xmm0);
-    __ ret(0);
-  } else {
-    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
-    __ ret(0);
-  }
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
-  } else {
-    __ fld_d(Operand::StaticVariable(canonical_nan_reference));
-  }
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ mov(edx, eax);
-  __ SmiUntag(edx);
-  __ push(edx);
-  __ fild_s(Operand(esp, 0));
-  __ pop(edx);
-  __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+  __ StoreNumberToDoubleElements(eax,
+                                 edi,
+                                 ecx,
+                                 edx,
+                                 xmm0,
+                                 &transition_elements_kind,
+                                 true);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3978,6 +3825,11 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  // Handle transition to other elements kinds without using the generic stub.
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/ic-inl.h b/src/ic-inl.h
index b4f789c..498cf3a 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -87,6 +87,8 @@
   }
 #endif
   Assembler::set_target_address_at(address, target->instruction_start());
+  target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
+                                                                  target);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 0f76a9a..2c6d55b 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -100,7 +100,11 @@
     PrintF("]\n");
   }
 }
-#endif
+#endif  // DEBUG
+
+
+#define TRACE_IC(type, name, old_state, new_target)             \
+  ASSERT((TraceIC(type, name, old_state, new_target), true))
 
 
 IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
@@ -167,7 +171,7 @@
                                              LookupResult* lookup,
                                              Object* receiver) {
   Object* end = lookup->IsProperty()
-      ? lookup->holder() : isolate->heap()->null_value();
+      ? lookup->holder() : Object::cast(isolate->heap()->null_value());
   for (Object* current = receiver;
        current != end;
        current = current->GetPrototype()) {
@@ -368,15 +372,13 @@
 }
 
 
-static void LookupForRead(Object* object,
-                          String* name,
+static void LookupForRead(Handle<Object> object,
+                          Handle<String> name,
                           LookupResult* lookup) {
-  AssertNoAllocation no_gc;  // pointers must stay valid
-
   // Skip all the objects with named interceptors, but
   // without actual getter.
   while (true) {
-    object->Lookup(name, lookup);
+    object->Lookup(*name, lookup);
     // Besides normal conditions (property not found or it's not
     // an interceptor), bail out if lookup is not cacheable: we won't
     // be able to IC it anyway and regular lookup should work fine.
@@ -386,18 +388,18 @@
       return;
     }
 
-    JSObject* holder = lookup->holder();
-    if (HasInterceptorGetter(holder)) {
+    Handle<JSObject> holder(lookup->holder());
+    if (HasInterceptorGetter(*holder)) {
       return;
     }
 
-    holder->LocalLookupRealNamedProperty(name, lookup);
+    holder->LocalLookupRealNamedProperty(*name, lookup);
     if (lookup->IsProperty()) {
       ASSERT(lookup->type() != INTERCEPTOR);
       return;
     }
 
-    Object* proto = holder->GetPrototype();
+    Handle<Object> proto(holder->GetPrototype());
     if (proto->IsNull()) {
       lookup->NotFound();
       return;
@@ -408,31 +410,32 @@
 }
 
 
-Object* CallICBase::TryCallAsFunction(Object* object) {
-  HandleScope scope(isolate());
-  Handle<Object> target(object, isolate());
-  Handle<Object> delegate = Execution::GetFunctionDelegate(target);
+Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
+  Handle<Object> delegate = Execution::GetFunctionDelegate(object);
 
-  if (delegate->IsJSFunction()) {
+  if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
     // Patch the receiver and use the delegate as the function to
-    // invoke. This is used for invoking objects as if they were
-    // functions.
-    const int argc = this->target()->arguments_count();
+    // invoke. This is used for invoking objects as if they were functions.
+    const int argc = target()->arguments_count();
     StackFrameLocator locator;
     JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
     int index = frame->ComputeExpressionsCount() - (argc + 1);
-    frame->SetExpression(index, *target);
+    frame->SetExpression(index, *object);
   }
 
-  return *delegate;
+  return delegate;
 }
 
 
 void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
                                             Handle<Object> object) {
+  while (callee->IsJSFunctionProxy()) {
+    callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
+  }
+
   if (callee->IsJSFunction()) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
-    if (function->shared()->strict_mode() || function->IsBuiltin()) {
+    if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
       // Do not wrap receiver for strict mode functions or for builtins.
       return;
     }
@@ -464,31 +467,27 @@
   // the element if so.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    Object* result;
-    { MaybeObject* maybe_result = object->GetElement(index);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-
-    if (result->IsJSFunction()) return result;
+    Handle<Object> result = Object::GetElement(object, index);
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
+    if (result->IsJSFunction()) return *result;
 
     // Try to find a suitable function delegate for the object at hand.
     result = TryCallAsFunction(result);
-    if (result->IsJSFunction()) return result;
+    if (result->IsJSFunction()) return *result;
 
     // Otherwise, it will fail in the lookup step.
   }
 
   // Lookup the property in the object.
-  LookupResult lookup;
-  LookupForRead(*object, *name, &lookup);
+  LookupResult lookup(isolate());
+  LookupForRead(object, name, &lookup);
 
   if (!lookup.IsProperty()) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    if (IsContextual(object)) {
-      return ReferenceError("not_defined", name);
-    }
-    return TypeError("undefined_method", object, name);
+    return IsContextual(object)
+        ? ReferenceError("not_defined", name)
+        : TypeError("undefined_method", object, name);
   }
 
   // Lookup is valid: Update inline cache and stub cache.
@@ -498,53 +497,42 @@
 
   // Get the property.
   PropertyAttributes attr;
-  Object* result;
-  { MaybeObject* maybe_result =
-        object->GetProperty(*object, &lookup, *name, &attr);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Object> result =
+      Object::GetProperty(object, object, &lookup, name, &attr);
+  RETURN_IF_EMPTY_HANDLE(isolate(), result);
 
-  if (lookup.type() == INTERCEPTOR) {
+  if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    if (attr == ABSENT) {
-      if (IsContextual(object)) {
-        return ReferenceError("not_defined", name);
-      }
-      return TypeError("undefined_method", object, name);
-    }
+    return IsContextual(object)
+        ? ReferenceError("not_defined", name)
+        : TypeError("undefined_method", object, name);
   }
 
   ASSERT(!result->IsTheHole());
 
-  HandleScope scope(isolate());
-  // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
-  // new object and cause GC.
-  Handle<Object> result_handle(result);
   // Make receiver an object if the callee requires it. Strict mode or builtin
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
-  ReceiverToObjectIfRequired(result_handle, object);
+  ReceiverToObjectIfRequired(result, object);
 
-  if (result_handle->IsJSFunction()) {
+  if (result->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(result);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Handle stepping into a function if step into is active.
     Debug* debug = isolate()->debug();
     if (debug->StepInActive()) {
       // Protect the result in a handle as the debugger can allocate and might
       // cause GC.
-      Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
       debug->HandleStepIn(function, object, fp(), false);
-      return *function;
     }
 #endif
-
-    return *result_handle;
+    return *function;
   }
 
   // Try to find a suitable function delegate for the object at hand.
-  result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
-  if (result_handle->IsJSFunction()) return *result_handle;
+  result = TryCallAsFunction(result);
+  if (result->IsJSFunction()) return *result;
 
   return TypeError("property_not_function", object, name);
 }
@@ -594,89 +582,57 @@
 }
 
 
-MaybeObject* CallICBase::ComputeMonomorphicStub(
-    LookupResult* lookup,
-    State state,
-    Code::ExtraICState extra_ic_state,
-    Handle<Object> object,
-    Handle<String> name) {
+Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
+                                                State state,
+                                                Code::ExtraICState extra_state,
+                                                Handle<Object> object,
+                                                Handle<String> name) {
   int argc = target()->arguments_count();
-  MaybeObject* maybe_code = NULL;
+  Handle<JSObject> holder(lookup->holder());
   switch (lookup->type()) {
     case FIELD: {
       int index = lookup->GetFieldIndex();
-      maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
-                                                             kind_,
-                                                             extra_ic_state,
-                                                             *name,
-                                                             *object,
-                                                             lookup->holder(),
-                                                             index);
-      break;
+      return isolate()->stub_cache()->ComputeCallField(
+          argc, kind_, extra_state, name, object, holder, index);
     }
     case CONSTANT_FUNCTION: {
       // Get the constant function and compute the code stub for this
       // call; used for rewriting to monomorphic state and making sure
       // that the code stub is in the stub cache.
-      JSFunction* function = lookup->GetConstantFunction();
-      maybe_code =
-          isolate()->stub_cache()->ComputeCallConstant(argc,
-                                                       kind_,
-                                                       extra_ic_state,
-                                                       *name,
-                                                       *object,
-                                                       lookup->holder(),
-                                                       function);
-      break;
+      Handle<JSFunction> function(lookup->GetConstantFunction());
+      return isolate()->stub_cache()->ComputeCallConstant(
+          argc, kind_, extra_state, name, object, holder, function);
     }
     case NORMAL: {
-      if (!object->IsJSObject()) return NULL;
+      // If we return a null handle, the IC will not be patched.
+      if (!object->IsJSObject()) return Handle<Code>::null();
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
-      if (lookup->holder()->IsGlobalObject()) {
-        GlobalObject* global = GlobalObject::cast(lookup->holder());
-        JSGlobalPropertyCell* cell =
-            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-        if (!cell->value()->IsJSFunction()) return NULL;
-        JSFunction* function = JSFunction::cast(cell->value());
-        maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
-                                                                kind_,
-                                                                extra_ic_state,
-                                                                *name,
-                                                                *receiver,
-                                                                global,
-                                                                cell,
-                                                                function);
+      if (holder->IsGlobalObject()) {
+        Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+        if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
+        Handle<JSFunction> function(JSFunction::cast(cell->value()));
+        return isolate()->stub_cache()->ComputeCallGlobal(
+            argc, kind_, extra_state, name, receiver, global, cell, function);
       } else {
         // There is only one shared stub for calling normalized
         // properties. It does not traverse the prototype chain, so the
         // property must be found in the receiver for the stub to be
         // applicable.
-        if (lookup->holder() != *receiver) return NULL;
-        maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
-                                                                kind_,
-                                                                extra_ic_state,
-                                                                *name,
-                                                                *receiver);
+        if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
+        return isolate()->stub_cache()->ComputeCallNormal(
+            argc, kind_, extra_state);
       }
       break;
     }
-    case INTERCEPTOR: {
-      ASSERT(HasInterceptorGetter(lookup->holder()));
-      maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
-          argc,
-          kind_,
-          extra_ic_state,
-          *name,
-          *object,
-          lookup->holder());
-      break;
-    }
+    case INTERCEPTOR:
+      ASSERT(HasInterceptorGetter(*holder));
+      return isolate()->stub_cache()->ComputeCallInterceptor(
+          argc, kind_, extra_state, name, object, holder);
     default:
-      maybe_code = NULL;
-      break;
+      return Handle<Code>::null();
   }
-  return maybe_code;
 }
 
 
@@ -698,75 +654,57 @@
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
-  MaybeObject* maybe_code = NULL;
   bool had_proto_failure = false;
+  Handle<Code> code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code =
-        isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
-                                                           kind_,
-                                                           extra_ic_state);
+    code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
+        argc, kind_, extra_ic_state);
   } else if (state == MONOMORPHIC) {
     if (kind_ == Code::CALL_IC &&
         TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
-      maybe_code = ComputeMonomorphicStub(lookup,
-                                          state,
-                                          extra_ic_state,
-                                          object,
-                                          name);
+      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                    object, name);
     } else if (kind_ == Code::CALL_IC &&
                TryRemoveInvalidPrototypeDependentStub(target(),
                                                       *object,
                                                       *name)) {
       had_proto_failure = true;
-      maybe_code = ComputeMonomorphicStub(lookup,
-                                          state,
-                                          extra_ic_state,
-                                          object,
-                                          name);
+      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                    object, name);
     } else {
-      maybe_code =
-          isolate()->stub_cache()->ComputeCallMegamorphic(argc,
-                                                          kind_,
-                                                          extra_ic_state);
+      code = isolate()->stub_cache()->ComputeCallMegamorphic(
+          argc, kind_, extra_ic_state);
     }
   } else {
-    maybe_code = ComputeMonomorphicStub(lookup,
-                                        state,
-                                        extra_ic_state,
-                                        object,
-                                        name);
+    code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
+                                  object, name);
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  Object* code;
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+  // If there's no appropriate stub we simply avoid updating the caches.
+  if (code.is_null()) return;
 
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED ||
       state == PREMONOMORPHIC ||
       state == MONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
-    Map* map = JSObject::cast(object->IsJSObject() ? *object :
-                              object->GetPrototype())->map();
-
+    Handle<JSObject> cache_object = object->IsJSObject()
+        ? Handle<JSObject>::cast(object)
+        : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+    isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
   }
 
-  USE(had_proto_failure);
-#ifdef DEBUG
   if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
-  TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
-          name, state, target());
-#endif
+  TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
+           name, state, target());
 }
 
 
@@ -786,34 +724,22 @@
 
   if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
     int argc = target()->arguments_count();
-    Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
-    Map* map = heap->non_strict_arguments_elements_map();
+    Handle<Map> map =
+        isolate()->factory()->non_strict_arguments_elements_map();
     if (object->IsJSObject() &&
-        Handle<JSObject>::cast(object)->elements()->map() == map) {
-      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+        Handle<JSObject>::cast(object)->elements()->map() == *map) {
+      Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
           argc, Code::KEYED_CALL_IC);
-      Object* code;
-      if (maybe_code->ToObject(&code)) {
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedCallIC", key, state, target());
-#endif
-      }
-    } else if (FLAG_use_ic && state != MEGAMORPHIC &&
-               !object->IsAccessCheckNeeded()) {
-      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+      set_target(*code);
+      TRACE_IC("KeyedCallIC", key, state, target());
+    } else if (!object->IsAccessCheckNeeded()) {
+      Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
           argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
-      Object* code;
-      if (maybe_code->ToObject(&code)) {
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedCallIC", key, state, target());
-#endif
-      }
+      set_target(*code);
+      TRACE_IC("KeyedCallIC", key, state, target());
     }
   }
 
-  HandleScope scope(isolate());
   Handle<Object> result = GetProperty(object, key);
   RETURN_IF_EMPTY_HANDLE(isolate(), result);
 
@@ -821,9 +747,9 @@
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
   ReceiverToObjectIfRequired(result, object);
-
   if (result->IsJSFunction()) return *result;
-  result = Handle<Object>(TryCallAsFunction(*result));
+
+  result = TryCallAsFunction(result);
   if (result->IsJSFunction()) return *result;
 
   return TypeError("property_not_function", object, key);
@@ -846,53 +772,44 @@
     // the underlying string value.  See ECMA-262 15.5.5.1.
     if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      AssertNoAllocation no_allocation;
-      Code* stub = NULL;
+      Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        if (object->IsString()) {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringLength);
-        } else {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_StringWrapperLength);
-        }
+        stub = object->IsString()
+            ? isolate()->builtins()->LoadIC_StringLength()
+            : isolate()->builtins()->LoadIC_StringWrapperLength();
       } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
-        stub = isolate()->builtins()->builtin(
-            Builtins::kLoadIC_StringWrapperLength);
+        stub = isolate()->builtins()->LoadIC_StringWrapperLength();
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (stub != NULL) {
-        set_target(stub);
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
 #endif
       }
       // Get the string if we have a string wrapper object.
-      if (object->IsJSValue()) {
-        return Smi::FromInt(
-            String::cast(Handle<JSValue>::cast(object)->value())->length());
-      }
-      return Smi::FromInt(String::cast(*object)->length());
+      Handle<Object> string = object->IsJSValue()
+          ? Handle<Object>(Handle<JSValue>::cast(object)->value())
+          : object;
+      return Smi::FromInt(String::cast(*string)->length());
     }
 
     // Use specialized code for getting the length of arrays.
     if (object->IsJSArray() &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      AssertNoAllocation no_allocation;
-      Code* stub = NULL;
+      Handle<Code> stub;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        stub = isolate()->builtins()->builtin(
-            Builtins::kLoadIC_ArrayLength);
+        stub = isolate()->builtins()->LoadIC_ArrayLength();
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (stub != NULL) {
-        set_target(stub);
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
@@ -903,23 +820,20 @@
     // Use specialized code for getting prototype of functions.
     if (object->IsJSFunction() &&
         name->Equals(isolate()->heap()->prototype_symbol()) &&
-        JSFunction::cast(*object)->should_have_prototype()) {
-      { AssertNoAllocation no_allocation;
-        Code* stub = NULL;
-        if (state == UNINITIALIZED) {
-          stub = pre_monomorphic_stub();
-        } else if (state == PREMONOMORPHIC) {
-          stub = isolate()->builtins()->builtin(
-              Builtins::kLoadIC_FunctionPrototype);
-        } else if (state != MEGAMORPHIC) {
-          stub = megamorphic_stub();
-        }
-        if (stub != NULL) {
-          set_target(stub);
+        Handle<JSFunction>::cast(object)->should_have_prototype()) {
+      Handle<Code> stub;
+      if (state == UNINITIALIZED) {
+        stub = pre_monomorphic_stub();
+      } else if (state == PREMONOMORPHIC) {
+        stub = isolate()->builtins()->LoadIC_FunctionPrototype();
+      } else if (state != MEGAMORPHIC) {
+        stub = megamorphic_stub();
+      }
+      if (!stub.is_null()) {
+        set_target(*stub);
 #ifdef DEBUG
-          if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+        if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
-        }
       }
       return Accessors::FunctionGetPrototype(*object, 0);
     }
@@ -931,8 +845,8 @@
   if (name->AsArrayIndex(&index)) return object->GetElement(index);
 
   // Named lookup in the object.
-  LookupResult lookup;
-  LookupForRead(*object, *name, &lookup);
+  LookupResult lookup(isolate());
+  LookupForRead(object, name, &lookup);
 
   // If we did not find a property, check if we need to throw an exception.
   if (!lookup.IsProperty()) {
@@ -951,17 +865,15 @@
   if (lookup.IsProperty() &&
       (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
     // Get the property.
-    Object* result;
-    { MaybeObject* maybe_result =
-          object->GetProperty(*object, &lookup, *name, &attr);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
+    Handle<Object> result =
+        Object::GetProperty(object, object, &lookup, name, &attr);
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
     // If the property is not present, check if we need to throw an
     // exception.
     if (attr == ABSENT && IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
-    return result;
+    return *result;
   }
 
   // Get the property.
@@ -984,120 +896,105 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  MaybeObject* maybe_code = NULL;
-  Object* code;
+  Handle<Code> code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = pre_monomorphic_stub();
+    code = pre_monomorphic_stub();
   } else if (!lookup->IsProperty()) {
     // Nonexistent property. The result is undefined.
-    maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
-                                                                 *receiver);
+    code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
   } else {
     // Compute monomorphic stub.
+    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD: {
-        maybe_code = isolate()->stub_cache()->ComputeLoadField(
-            *name,
-            *receiver,
-            lookup->holder(),
-            lookup->GetFieldIndex());
+      case FIELD:
+        code = isolate()->stub_cache()->ComputeLoadField(
+            name, receiver, holder, lookup->GetFieldIndex());
         break;
-      }
       case CONSTANT_FUNCTION: {
-        Object* constant = lookup->GetConstantFunction();
-        maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
-            *name, *receiver, lookup->holder(), constant);
+        Handle<Object> constant(lookup->GetConstantFunction());
+        code = isolate()->stub_cache()->ComputeLoadConstant(
+            name, receiver, holder, constant);
         break;
       }
-      case NORMAL: {
-        if (lookup->holder()->IsGlobalObject()) {
-          GlobalObject* global = GlobalObject::cast(lookup->holder());
-          JSGlobalPropertyCell* cell =
-              JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-          maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
-                                                    *receiver,
-                                                    global,
-                                                    cell,
-                                                    lookup->IsDontDelete());
+      case NORMAL:
+        if (holder->IsGlobalObject()) {
+          Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+          Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+          code = isolate()->stub_cache()->ComputeLoadGlobal(
+              name, receiver, global, cell, lookup->IsDontDelete());
         } else {
           // There is only one shared stub for loading normalized
           // properties. It does not traverse the prototype chain, so the
           // property must be found in the receiver for the stub to be
           // applicable.
-          if (lookup->holder() != *receiver) return;
-          maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
+          if (!holder.is_identical_to(receiver)) return;
+          code = isolate()->stub_cache()->ComputeLoadNormal();
         }
         break;
-      }
       case CALLBACKS: {
-        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-        AccessorInfo* callback =
-            AccessorInfo::cast(lookup->GetCallbackObject());
+        Handle<Object> callback_object(lookup->GetCallbackObject());
+        if (!callback_object->IsAccessorInfo()) return;
+        Handle<AccessorInfo> callback =
+            Handle<AccessorInfo>::cast(callback_object);
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
-            *name, *receiver, lookup->holder(), callback);
+        code = isolate()->stub_cache()->ComputeLoadCallback(
+            name, receiver, holder, callback);
         break;
       }
-      case INTERCEPTOR: {
-        ASSERT(HasInterceptorGetter(lookup->holder()));
-        maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
-            *name, *receiver, lookup->holder());
+      case INTERCEPTOR:
+        ASSERT(HasInterceptorGetter(*holder));
+        code = isolate()->stub_cache()->ComputeLoadInterceptor(
+            name, receiver, holder);
         break;
-      }
       default:
         return;
     }
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.
-  if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+  if (state == UNINITIALIZED ||
+      state == PREMONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
-    set_target(megamorphic_stub());
+    set_target(*megamorphic_stub());
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe.
-    Map* map = JSObject::cast(object->IsJSObject() ? *object :
-                              object->GetPrototype())->map();
-
-    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
+    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
   }
 
-#ifdef DEBUG
-  TraceIC("LoadIC", name, state, target());
-#endif
+  TRACE_IC("LoadIC", name, state, target());
 }
 
 
-MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
     ElementsKind elements_kind) {
-  return KeyedLoadElementStub(elements_kind).TryGetCode();
+  return KeyedLoadElementStub(elements_kind).GetCode();
 }
 
 
-MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
-    MapList* receiver_maps,
-    CodeList* targets,
+Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
+    MapHandleList* receiver_maps,
     StrictModeFlag strict_mode) {
-  Object* object;
-  KeyedLoadStubCompiler compiler;
-  MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
-                                                            targets);
-  if (!maybe_code->ToObject(&object)) return maybe_code;
+  CodeHandleList handler_ics(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map = receiver_maps->at(i);
+    Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+        receiver_map, strict_mode);
+    handler_ics.Add(cached_stub);
+  }
+  KeyedLoadStubCompiler compiler(isolate());
+  Handle<Code> code = compiler.CompileLoadPolymorphic(
+      receiver_maps, &handler_ics);
   isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
-  PROFILE(isolate(), CodeCreateEvent(
-      Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
-      Code::cast(object), 0));
-  return object;
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
+  return code;
 }
 
 
@@ -1107,9 +1004,8 @@
                                bool force_generic_stub) {
   // Check for values that can be converted into a symbol.
   // TODO(1295): Remove this code.
-  HandleScope scope(isolate());
   if (key->IsHeapNumber() &&
-      isnan(HeapNumber::cast(*key)->value())) {
+      isnan(Handle<HeapNumber>::cast(key)->value())) {
     key = isolate()->factory()->nan_symbol();
   } else if (key->IsUndefined()) {
     key = isolate()->factory()->undefined_symbol();
@@ -1131,16 +1027,11 @@
       if (object->IsString() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<String> string = Handle<String>::cast(object);
-        Object* code = NULL;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
-                                                                    *string);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
         return Smi::FromInt(string->length());
       }
 
@@ -1148,34 +1039,25 @@
       if (object->IsJSArray() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<JSArray> array = Handle<JSArray>::cast(object);
-        Object* code;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
-                                                                   *array);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
-        return JSArray::cast(*object)->length();
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
+        return array->length();
       }
 
       // Use specialized code for getting prototype of functions.
       if (object->IsJSFunction() &&
           name->Equals(isolate()->heap()->prototype_symbol()) &&
-        JSFunction::cast(*object)->should_have_prototype()) {
+          Handle<JSFunction>::cast(object)->should_have_prototype()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(object);
-        Object* code;
-        { MaybeObject* maybe_code =
-              isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
-                  *name, *function);
-          if (!maybe_code->ToObject(&code)) return maybe_code;
-        }
-        set_target(Code::cast(code));
-#ifdef DEBUG
-        TraceIC("KeyedLoadIC", name, state, target());
-#endif  // DEBUG
+        Handle<Code> code =
+            isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+                name, function);
+        ASSERT(!code.is_null());
+        set_target(*code);
+        TRACE_IC("KeyedLoadIC", name, state, target());
         return Accessors::FunctionGetPrototype(*object, 0);
       }
     }
@@ -1184,15 +1066,14 @@
     // the element or char if so.
     uint32_t index = 0;
     if (name->AsArrayIndex(&index)) {
-      HandleScope scope(isolate());
       // Rewrite to the generic keyed load stub.
-      if (FLAG_use_ic) set_target(generic_stub());
+      if (FLAG_use_ic) set_target(*generic_stub());
       return Runtime::GetElementOrCharAt(isolate(), object, index);
     }
 
     // Named lookup.
-    LookupResult lookup;
-    LookupForRead(*object, *name, &lookup);
+    LookupResult lookup(isolate());
+    LookupForRead(object, name, &lookup);
 
     // If we did not find a property, check if we need to throw an exception.
     if (!lookup.IsProperty() && IsContextual(object)) {
@@ -1206,17 +1087,15 @@
     PropertyAttributes attr;
     if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
       // Get the property.
-      Object* result;
-      { MaybeObject* maybe_result =
-            object->GetProperty(*object, &lookup, *name, &attr);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
-      }
+      Handle<Object> result =
+          Object::GetProperty(object, object, &lookup, name, &attr);
+      RETURN_IF_EMPTY_HANDLE(isolate(), result);
       // If the property is not present, check if we need to throw an
       // exception.
       if (attr == ABSENT && IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
-      return result;
+      return *result;
     }
 
     return object->GetProperty(*object, &lookup, *name, &attr);
@@ -1227,44 +1106,38 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
 
   if (use_ic) {
-    Code* stub = generic_stub();
+    Handle<Code> stub = generic_stub();
     if (!force_generic_stub) {
       if (object->IsString() && key->IsNumber()) {
         if (state == UNINITIALIZED) {
           stub = string_stub();
         }
       } else if (object->IsJSObject()) {
-        JSObject* receiver = JSObject::cast(*object);
-        Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
-        Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
-        if (elements_map == heap->non_strict_arguments_elements_map()) {
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+        if (receiver->elements()->map() ==
+            isolate()->heap()->non_strict_arguments_elements_map()) {
           stub = non_strict_arguments_stub();
         } else if (receiver->HasIndexedInterceptor()) {
           stub = indexed_interceptor_stub();
-        } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
-          MaybeObject* maybe_stub = ComputeStub(receiver,
-                                                false,
-                                                kNonStrictMode,
-                                                stub);
-          stub = maybe_stub->IsFailure() ?
-              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+        } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+          stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
         }
       }
     }
-    if (stub != NULL) set_target(stub);
+    if (!stub.is_null()) set_target(*stub);
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedLoadIC", key, state, target());
-#endif  // DEBUG
+  TRACE_IC("KeyedLoadIC", key, state, target());
 
   // Get the property.
   return Runtime::GetObjectProperty(isolate(), object, key);
 }
 
 
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
-                               Handle<Object> object, Handle<String> name) {
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
+                               State state,
+                               Handle<Object> object,
+                               Handle<String> name) {
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -1274,68 +1147,60 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  MaybeObject* maybe_code = NULL;
-  Object* code;
+  Handle<Code> code;
 
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = pre_monomorphic_stub();
+    code = pre_monomorphic_stub();
   } else {
     // Compute a monomorphic stub.
+    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD: {
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
-            *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
+      case FIELD:
+        code = isolate()->stub_cache()->ComputeKeyedLoadField(
+            name, receiver, holder, lookup->GetFieldIndex());
         break;
-      }
       case CONSTANT_FUNCTION: {
-        Object* constant = lookup->GetConstantFunction();
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
-            *name, *receiver, lookup->holder(), constant);
+        Handle<Object> constant(lookup->GetConstantFunction());
+        code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+            name, receiver, holder, constant);
         break;
       }
       case CALLBACKS: {
-        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-        AccessorInfo* callback =
-            AccessorInfo::cast(lookup->GetCallbackObject());
+        Handle<Object> callback_object(lookup->GetCallbackObject());
+        if (!callback_object->IsAccessorInfo()) return;
+        Handle<AccessorInfo> callback =
+            Handle<AccessorInfo>::cast(callback_object);
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
-            *name, *receiver, lookup->holder(), callback);
+        code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+            name, receiver, holder, callback);
         break;
       }
-      case INTERCEPTOR: {
+      case INTERCEPTOR:
         ASSERT(HasInterceptorGetter(lookup->holder()));
-        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
-            *name, *receiver, lookup->holder());
+        code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+            name, receiver, holder);
         break;
-      }
-      default: {
+      default:
         // Always rewrite to the generic case so that we do not
         // repeatedly try to rewrite.
-        maybe_code = generic_stub();
+        code = generic_stub();
         break;
-      }
     }
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
-    set_target(megamorphic_stub());
+    set_target(*megamorphic_stub());
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedLoadIC", name, state, target());
-#endif
+  TRACE_IC("KeyedLoadIC", name, state, target());
 }
 
 
@@ -1351,20 +1216,18 @@
 }
 
 
-static bool LookupForWrite(JSReceiver* receiver,
-                           String* name,
+static bool LookupForWrite(Handle<JSObject> receiver,
+                           Handle<String> name,
                            LookupResult* lookup) {
-  receiver->LocalLookup(name, lookup);
+  receiver->LocalLookup(*name, lookup);
   if (!StoreICableLookup(lookup)) {
     return false;
   }
 
-  if (lookup->type() == INTERCEPTOR) {
-    JSObject* object = JSObject::cast(receiver);
-    if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
-      object->LocalLookupRealNamedProperty(name, lookup);
-      return StoreICableLookup(lookup);
-    }
+  if (lookup->type() == INTERCEPTOR &&
+      receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+    receiver->LocalLookupRealNamedProperty(*name, lookup);
+    return StoreICableLookup(lookup);
   }
 
   return true;
@@ -1376,58 +1239,58 @@
                             Handle<Object> object,
                             Handle<String> name,
                             Handle<Object> value) {
-  // If the object is undefined or null it's illegal to try to set any
-  // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
-    return TypeError("non_object_property_store", object, name);
-  }
+  if (!object->IsJSObject()) {
+    // Handle proxies.
+    if (object->IsJSProxy()) {
+      return JSProxy::cast(*object)->
+          SetProperty(*name, *value, NONE, strict_mode);
+    }
 
-  if (!object->IsJSReceiver()) {
+    // If the object is undefined or null it's illegal to try to set any
+    // properties on it; throw a TypeError in that case.
+    if (object->IsUndefined() || object->IsNull()) {
+      return TypeError("non_object_property_store", object, name);
+    }
+
     // The length property of string values is read-only. Throw in strict mode.
     if (strict_mode == kStrictMode && object->IsString() &&
         name->Equals(isolate()->heap()->length_symbol())) {
       return TypeError("strict_read_only_property", object, name);
     }
-    // Ignore stores where the receiver is not a JSObject.
+    // Ignore other stores where the receiver is not a JSObject.
+    // TODO(1475): Must check prototype chains of object wrappers.
     return *value;
   }
 
-  // Handle proxies.
-  if (object->IsJSProxy()) {
-    return JSReceiver::cast(*object)->
-        SetProperty(*name, *value, NONE, strict_mode);
-  }
-
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
   // Check if the given name is an array index.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    HandleScope scope(isolate());
     Handle<Object> result = SetElement(receiver, index, value, strict_mode);
-    if (result.is_null()) return Failure::Exception();
+    RETURN_IF_EMPTY_HANDLE(isolate(), result);
     return *value;
   }
 
   // Use specialized code for setting the length of arrays.
   if (receiver->IsJSArray()
       && name->Equals(isolate()->heap()->length_symbol())
-      && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
+      && Handle<JSArray>::cast(receiver)->AllowsSetElementsLength()) {
 #ifdef DEBUG
     if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
 #endif
-    Builtins::Name target = (strict_mode == kStrictMode)
-        ? Builtins::kStoreIC_ArrayLength_Strict
-        : Builtins::kStoreIC_ArrayLength;
-    set_target(isolate()->builtins()->builtin(target));
+    Handle<Code> stub = (strict_mode == kStrictMode)
+        ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
+        : isolate()->builtins()->StoreIC_ArrayLength();
+    set_target(*stub);
     return receiver->SetProperty(*name, *value, NONE, strict_mode);
   }
 
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
-    LookupResult lookup;
+    LookupResult lookup(isolate());
 
-    if (LookupForWrite(*receiver, *name, &lookup)) {
+    if (LookupForWrite(receiver, name, &lookup)) {
       // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
@@ -1444,16 +1307,15 @@
   }
 
   if (receiver->IsJSGlobalProxy()) {
+    // TODO(ulan): find out why we patch this site even with --no-use-ic
     // Generate a generic stub that goes to the runtime when we see a global
     // proxy as receiver.
-    Code* stub = (strict_mode == kStrictMode)
+    Handle<Code> stub = (strict_mode == kStrictMode)
         ? global_proxy_stub_strict()
         : global_proxy_stub();
-    if (target() != stub) {
-      set_target(stub);
-#ifdef DEBUG
-      TraceIC("StoreIC", name, state, target());
-#endif
+    if (target() != *stub) {
+      set_target(*stub);
+      TRACE_IC("StoreIC", name, state, target());
     }
   }
 
@@ -1468,10 +1330,12 @@
                            Handle<JSObject> receiver,
                            Handle<String> name,
                            Handle<Object> value) {
-  // Skip JSGlobalProxy.
   ASSERT(!receiver->IsJSGlobalProxy());
-
   ASSERT(StoreICableLookup(lookup));
+  // These are not cacheable, so we never see such LookupResults here.
+  ASSERT(lookup->type() != HANDLER);
+  // We get only called for properties or transitions, see StoreICableLookup.
+  ASSERT(lookup->type() != NULL_DESCRIPTOR);
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1481,89 +1345,87 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  MaybeObject* maybe_code = NULL;
-  Object* code = NULL;
+  Handle<Code> code;
   switch (type) {
-    case FIELD: {
-      maybe_code = isolate()->stub_cache()->ComputeStoreField(
-          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+    case FIELD:
+      code = isolate()->stub_cache()->ComputeStoreField(name,
+                                                        receiver,
+                                                        lookup->GetFieldIndex(),
+                                                        Handle<Map>::null(),
+                                                        strict_mode);
       break;
-    }
     case MAP_TRANSITION: {
       if (lookup->GetAttributes() != NONE) return;
-      HandleScope scope(isolate());
-      ASSERT(type == MAP_TRANSITION);
       Handle<Map> transition(lookup->GetTransitionMap());
       int index = transition->PropertyIndexFor(*name);
-      maybe_code = isolate()->stub_cache()->ComputeStoreField(
-          *name, *receiver, index, *transition, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreField(
+          name, receiver, index, transition, strict_mode);
       break;
     }
-    case NORMAL: {
+    case NORMAL:
       if (receiver->IsGlobalObject()) {
         // The stub generated for the global object picks the value directly
         // from the property cell. So the property must be directly on the
         // global object.
         Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
-        JSGlobalPropertyCell* cell =
-            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-        maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
-            *name, *global, cell, strict_mode);
+        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+        code = isolate()->stub_cache()->ComputeStoreGlobal(
+            name, global, cell, strict_mode);
       } else {
         if (lookup->holder() != *receiver) return;
-        maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+        code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
       }
       break;
-    }
     case CALLBACKS: {
-      if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      Handle<Object> callback_object(lookup->GetCallbackObject());
+      if (!callback_object->IsAccessorInfo()) return;
+      Handle<AccessorInfo> callback =
+          Handle<AccessorInfo>::cast(callback_object);
       if (v8::ToCData<Address>(callback->setter()) == 0) return;
-      maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
-          *name, *receiver, callback, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreCallback(
+          name, receiver, callback, strict_mode);
       break;
     }
-    case INTERCEPTOR: {
+    case INTERCEPTOR:
       ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
-      maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
-          *name, *receiver, strict_mode);
+      code = isolate()->stub_cache()->ComputeStoreInterceptor(
+          name, receiver, strict_mode);
       break;
-    }
-    default:
+    case CONSTANT_FUNCTION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return;
+    case HANDLER:
+    case NULL_DESCRIPTOR:
+      UNREACHABLE();
       return;
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
-
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
     // Only move to megamorphic if the target changes.
-    if (target() != Code::cast(code)) {
+    if (target() != *code) {
       set_target((strict_mode == kStrictMode)
                    ? megamorphic_stub_strict()
                    : megamorphic_stub());
     }
   } else if (state == MEGAMORPHIC) {
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name,
-                                 receiver->map(),
-                                 Code::cast(code));
+    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
   }
 
-#ifdef DEBUG
-  TraceIC("StoreIC", name, state, target());
-#endif
+  TRACE_IC("StoreIC", name, state, target());
 }
 
 
-static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
-                                       Map* new_receiver_map) {
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+                                       Handle<Map> new_receiver_map) {
+  ASSERT(!new_receiver_map.is_null());
   for (int current = 0; current < receiver_maps->length(); ++current) {
-    if (receiver_maps->at(current) == new_receiver_map) {
+    if (!receiver_maps->at(current).is_null() &&
+        receiver_maps->at(current).is_identical_to(new_receiver_map)) {
       return false;
     }
   }
@@ -1572,44 +1434,40 @@
 }
 
 
-void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
+                                     MapHandleList* result) {
   ASSERT(stub->is_inline_cache_stub());
-  if (stub == string_stub()) {
-    return result->Add(isolate()->heap()->string_map());
+  if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
+    return result->Add(isolate()->factory()->string_map());
   } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
     if (stub->ic_state() == MONOMORPHIC) {
-      result->Add(Map::cast(stub->FindFirstMap()));
+      result->Add(Handle<Map>(stub->FindFirstMap()));
     } else {
       ASSERT(stub->ic_state() == MEGAMORPHIC);
       AssertNoAllocation no_allocation;
       int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-      for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+      for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
         RelocInfo* info = it.rinfo();
-        Object* object = info->target_object();
+        Handle<Object> object(info->target_object());
         ASSERT(object->IsMap());
-        result->Add(Map::cast(object));
+        AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
       }
     }
   }
 }
 
 
-MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
-                                  bool is_store,
+Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
+                                  StubKind stub_kind,
                                   StrictModeFlag strict_mode,
-                                  Code* generic_stub) {
+                                  Handle<Code> generic_stub) {
   State ic_state = target()->ic_state();
-  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
-    Code* monomorphic_stub;
-    MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
-                                                     is_store,
-                                                     strict_mode,
-                                                     generic_stub);
-    if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
-
-    return monomorphic_stub;
+  if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) &&
+      !IsTransitionStubKind(stub_kind)) {
+    return ComputeMonomorphicStub(
+        receiver, stub_kind, strict_mode, generic_stub);
   }
-  ASSERT(target() != generic_stub);
+  ASSERT(target() != *generic_stub);
 
   // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
   // via megamorphic stubs, since they don't have a map in their relocation info
@@ -1620,10 +1478,21 @@
 
   // Determine the list of receiver maps that this call site has seen,
   // adding the map that was just encountered.
-  MapList target_receiver_maps;
-  GetReceiverMapsForStub(target(), &target_receiver_maps);
-  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
-    // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+  MapHandleList target_receiver_maps;
+  Handle<Map> receiver_map(receiver->map());
+  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+    target_receiver_maps.Add(receiver_map);
+  } else {
+    GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
+  }
+  bool map_added =
+      AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+  if (IsTransitionStubKind(stub_kind)) {
+    Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
+    map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
+  }
+  if (!map_added) {
+    // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
     return generic_stub;
   }
@@ -1634,47 +1503,29 @@
     return generic_stub;
   }
 
-  PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
-  Code::Flags flags = Code::ComputeFlags(this->kind(),
-                                         MEGAMORPHIC,
-                                         strict_mode);
-  Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
-  // If there is a cached stub, use it.
-  if (!maybe_cached_stub->IsUndefined()) {
-    ASSERT(maybe_cached_stub->IsCode());
-    return Code::cast(maybe_cached_stub);
-  }
-  // Collect MONOMORPHIC stubs for all target_receiver_maps.
-  CodeList handler_ics(target_receiver_maps.length());
-  for (int i = 0; i < target_receiver_maps.length(); ++i) {
-    Map* receiver_map(target_receiver_maps.at(i));
-    MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
-        receiver_map, strict_mode);
-    Code* cached_stub;
-    if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
-    handler_ics.Add(cached_stub);
-  }
-  // Build the MEGAMORPHIC stub.
-  Code* stub;
-  MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
-                                                     &handler_ics,
-                                                     strict_mode);
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
-  if (maybe_update->IsFailure()) return maybe_update;
+  Handle<PolymorphicCodeCache> cache =
+      isolate()->factory()->polymorphic_code_cache();
+  Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, strict_mode);
+  Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  Handle<Code> stub =
+      ComputePolymorphicStub(&target_receiver_maps, strict_mode);
+  PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
   return stub;
 }
 
 
-MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
-    Map* receiver_map,
+Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+    Handle<Map> receiver_map,
     StrictModeFlag strict_mode) {
   if ((receiver_map->instance_type() & kNotStringTag) == 0) {
-    ASSERT(string_stub() != NULL);
+    ASSERT(!string_stub().is_null());
     return string_stub();
   } else {
     ASSERT(receiver_map->has_dictionary_elements() ||
            receiver_map->has_fast_elements() ||
+           receiver_map->has_fast_smi_only_elements() ||
            receiver_map->has_fast_double_elements() ||
            receiver_map->has_external_array_elements());
     bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1684,47 +1535,78 @@
 }
 
 
-MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
-                                             bool is_store,
+Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
+                                             StubKind stub_kind,
                                              StrictModeFlag strict_mode,
-                                             Code* generic_stub) {
-  Code* result = NULL;
+                                             Handle<Code> generic_stub) {
   if (receiver->HasFastElements() ||
+      receiver->HasFastSmiOnlyElements() ||
       receiver->HasExternalArrayElements() ||
       receiver->HasFastDoubleElements() ||
       receiver->HasDictionaryElements()) {
-    MaybeObject* maybe_stub =
-        isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
-            receiver, is_store, strict_mode);
-    if (!maybe_stub->To(&result)) return maybe_stub;
+    return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+        receiver, stub_kind, strict_mode);
   } else {
-    result = generic_stub;
+    return generic_stub;
   }
-  return result;
 }
 
 
-MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
+                                            StubKind stub_kind) {
+  switch (stub_kind) {
+    case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
+    case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
+      return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
+      break;
+    case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
+      return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
+      break;
+    default:
+      UNREACHABLE();
+      return Handle<Map>::null();
+  }
+}
+
+
+Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
     ElementsKind elements_kind) {
-  return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  return KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
 }
 
 
-MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
-    MapList* receiver_maps,
-    CodeList* targets,
-    StrictModeFlag strict_mode) {
-  Object* object;
-  KeyedStoreStubCompiler compiler(strict_mode);
-  MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
-                                                             targets);
-  if (!maybe_code->ToObject(&object)) return maybe_code;
+Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                                  StrictModeFlag strict_mode) {
+  // Collect MONOMORPHIC stubs for all target_receiver_maps.
+  CodeHandleList handler_ics(receiver_maps->length());
+  MapHandleList transitioned_maps(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map(receiver_maps->at(i));
+    Handle<Code> cached_stub;
+    Handle<Map> transitioned_map =
+        receiver_map->FindTransitionedMap(receiver_maps);
+    if (!transitioned_map.is_null()) {
+      cached_stub = ElementsTransitionAndStoreStub(
+          receiver_map->elements_kind(),  // original elements_kind
+          transitioned_map->elements_kind(),
+          receiver_map->instance_type() == JS_ARRAY_TYPE,  // is_js_array
+          strict_mode).GetCode();
+    } else {
+      cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
+                                                          strict_mode);
+    }
+    ASSERT(!cached_stub.is_null());
+    handler_ics.Add(cached_stub);
+    transitioned_maps.Add(transitioned_map);
+  }
+  KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+  Handle<Code> code = compiler.CompileStorePolymorphic(
+      receiver_maps, &handler_ics, &transitioned_maps);
   isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
-  PROFILE(isolate(), CodeCreateEvent(
-      Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
-      Code::cast(object), 0));
-  return object;
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
+  return code;
 }
 
 
@@ -1737,6 +1619,12 @@
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
+    // Handle proxies.
+    if (object->IsJSProxy()) {
+      return JSProxy::cast(*object)->SetProperty(
+          *name, *value, NONE, strict_mode);
+    }
+
     // If the object is undefined or null it's illegal to try to set any
     // properties on it; throw a TypeError in that case.
     if (object->IsUndefined() || object->IsNull()) {
@@ -1750,19 +1638,17 @@
     // Check if the given name is an array index.
     uint32_t index;
     if (name->AsArrayIndex(&index)) {
-      HandleScope scope(isolate());
       Handle<Object> result = SetElement(receiver, index, value, strict_mode);
-      if (result.is_null()) return Failure::Exception();
+      RETURN_IF_EMPTY_HANDLE(isolate(), result);
       return *value;
     }
 
-    // Lookup the property locally in the receiver.
-    LookupResult lookup;
-    receiver->LocalLookup(*name, &lookup);
-
     // Update inline cache and stub cache.
-    if (FLAG_use_ic) {
-      UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+    if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+      LookupResult lookup(isolate());
+      if (LookupForWrite(receiver, name, &lookup)) {
+        UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+      }
     }
 
     // Set the property.
@@ -1775,33 +1661,36 @@
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic) {
-    Code* stub = (strict_mode == kStrictMode)
+    Handle<Code> stub = (strict_mode == kStrictMode)
         ? generic_stub_strict()
         : generic_stub();
     if (object->IsJSObject()) {
-      JSObject* receiver = JSObject::cast(*object);
-      Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
-      Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
-      if (elements_map == heap->non_strict_arguments_elements_map()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->elements()->map() ==
+          isolate()->heap()->non_strict_arguments_elements_map()) {
         stub = non_strict_arguments_stub();
       } else if (!force_generic) {
-        if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
-          HandleScope scope(isolate());
-          MaybeObject* maybe_stub = ComputeStub(receiver,
-                                                true,
-                                                strict_mode,
-                                                stub);
-          stub = maybe_stub->IsFailure() ?
-              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+        if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+          StubKind stub_kind = STORE_NO_TRANSITION;
+          if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+            if (value->IsHeapNumber()) {
+              stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE;
+            } else if (value->IsHeapObject()) {
+              stub_kind = STORE_TRANSITION_SMI_TO_OBJECT;
+            }
+          } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) {
+            if (!value->IsSmi() && !value->IsHeapNumber()) {
+              stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT;
+            }
+          }
+          stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
         }
       }
     }
-    if (stub != NULL) set_target(stub);
+    if (!stub.is_null()) set_target(*stub);
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedStoreIC", key, state, target());
-#endif
+  TRACE_IC("KeyedStoreIC", key, state, target());
 
   // Set the property.
   return Runtime::SetObjectProperty(
@@ -1815,15 +1704,12 @@
                                 Handle<JSObject> receiver,
                                 Handle<String> name,
                                 Handle<Object> value) {
-  // Skip JSGlobalProxy.
-  if (receiver->IsJSGlobalProxy()) return;
-
-  // Bail out if we didn't find a result.
-  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
-
-  // If the property is read-only, we leave the IC in its current
-  // state.
-  if (lookup->IsReadOnly()) return;
+  ASSERT(!receiver->IsJSGlobalProxy());
+  ASSERT(StoreICableLookup(lookup));
+  // These are not cacheable, so we never see such LookupResults here.
+  ASSERT(lookup->type() != HANDLER);
+  // We get only called for properties or transitions, see StoreICableLookup.
+  ASSERT(lookup->type() != NULL_DESCRIPTOR);
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1833,75 +1719,68 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  MaybeObject* maybe_code = NULL;
-  Object* code = NULL;
+  Handle<Code> code;
 
   switch (type) {
-    case FIELD: {
-      maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
-          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
+    case FIELD:
+      code = isolate()->stub_cache()->ComputeKeyedStoreField(
+          name, receiver, lookup->GetFieldIndex(),
+          Handle<Map>::null(), strict_mode);
       break;
-    }
-    case MAP_TRANSITION: {
+    case MAP_TRANSITION:
       if (lookup->GetAttributes() == NONE) {
-        HandleScope scope(isolate());
-        ASSERT(type == MAP_TRANSITION);
         Handle<Map> transition(lookup->GetTransitionMap());
         int index = transition->PropertyIndexFor(*name);
-        maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
-            *name, *receiver, index, *transition, strict_mode);
+        code = isolate()->stub_cache()->ComputeKeyedStoreField(
+            name, receiver, index, transition, strict_mode);
         break;
       }
       // fall through.
-    }
-    default: {
+    case NORMAL:
+    case CONSTANT_FUNCTION:
+    case CALLBACKS:
+    case INTERCEPTOR:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
       // Always rewrite to the generic case so that we do not
       // repeatedly try to rewrite.
-      maybe_code = (strict_mode == kStrictMode)
+      code = (strict_mode == kStrictMode)
           ? generic_stub_strict()
           : generic_stub();
       break;
-    }
+    case HANDLER:
+    case NULL_DESCRIPTOR:
+      UNREACHABLE();
+      return;
   }
 
-  // If we're unable to compute the stub (not enough memory left), we
-  // simply avoid updating the caches.
-  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+  ASSERT(!code.is_null());
 
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(Code::cast(code));
+    set_target(*code);
   } else if (state == MONOMORPHIC) {
     set_target((strict_mode == kStrictMode)
-                 ? megamorphic_stub_strict()
-                 : megamorphic_stub());
+                 ? *megamorphic_stub_strict()
+                 : *megamorphic_stub());
   }
 
-#ifdef DEBUG
-  TraceIC("KeyedStoreIC", name, state, target());
-#endif
+  TRACE_IC("KeyedStoreIC", name, state, target());
 }
 
 
+#undef TRACE_IC
+
+
 // ----------------------------------------------------------------------------
 // Static IC stub generators.
 //
 
-static JSFunction* CompileFunction(Isolate* isolate,
-                                   JSFunction* function) {
-  // Compile now with optimization.
-  HandleScope scope(isolate);
-  Handle<JSFunction> function_handle(function, isolate);
-  CompileLazy(function_handle, CLEAR_EXCEPTION);
-  return *function_handle;
-}
-
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1910,45 +1789,46 @@
                                               extra_ic_state,
                                               args.at<Object>(0),
                                               args.at<String>(1));
-  Object* result;
-  if (!maybe_result->ToObject(&result)) return maybe_result;
+  // Result could be a function or a failure.
+  JSFunction* raw_function = NULL;
+  if (!maybe_result->To(&raw_function)) return maybe_result;
 
   // The first time the inline cache is updated may be the first time the
-  // function it references gets called.  If the function was lazily compiled
+  // function it references gets called.  If the function is lazily compiled
   // then the first call will trigger a compilation.  We check for this case
   // and we do the compilation immediately, instead of waiting for the stub
-  // currently attached to the JSFunction object to trigger compilation.  We
-  // do this in the case where we know that the inline cache is inside a loop,
-  // because then we know that we want to optimize the function.
-  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
-    return result;
-  }
-  return CompileFunction(isolate, JSFunction::cast(result));
+  // currently attached to the JSFunction object to trigger compilation.
+  if (raw_function->is_compiled()) return raw_function;
+
+  Handle<JSFunction> function(raw_function);
+  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  return *function;
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedCallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
-  Object* result;
-  { MaybeObject* maybe_result =
+  MaybeObject* maybe_result =
       ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  // Result could be a function or a failure.
+  JSFunction* raw_function = NULL;
+  if (!maybe_result->To(&raw_function)) return maybe_result;
 
-  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
-    return result;
-  }
-  return CompileFunction(isolate, JSFunction::cast(result));
+  if (raw_function->is_compiled()) return raw_function;
+
+  Handle<JSFunction> function(raw_function);
+  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  return *function;
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   LoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1958,7 +1838,7 @@
 
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1967,7 +1847,7 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1977,7 +1857,7 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope;
   ASSERT(args.length() == 3);
   StoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2046,7 +1926,7 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2080,7 +1960,7 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
-  NoHandleAllocation na;
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2402,7 +2282,7 @@
   Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
 
   bool caught_exception;
-  Object** builtin_args[] = { right.location() };
+  Handle<Object> builtin_args[] = { right };
   Handle<Object> result = Execution::Call(builtin_function,
                                           left,
                                           ARRAY_SIZE(builtin_args),
diff --git a/src/ic.h b/src/ic.h
index ece5be9..81aa6b7 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -198,47 +198,60 @@
   class Contextual: public BitField<bool, 0, 1> {};
   class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
 
- protected:
-  CallICBase(Code::Kind kind, Isolate* isolate)
-      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
-
- public:
+  // Returns a JSFunction or a Failure.
   MUST_USE_RESULT MaybeObject* LoadFunction(State state,
                                             Code::ExtraICState extra_ic_state,
                                             Handle<Object> object,
                                             Handle<String> name);
 
  protected:
-  Code::Kind kind_;
+  CallICBase(Code::Kind kind, Isolate* isolate)
+      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
 
   bool TryUpdateExtraICState(LookupResult* lookup,
                              Handle<Object> object,
                              Code::ExtraICState* extra_ic_state);
 
-  MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
-      LookupResult* lookup,
-      State state,
-      Code::ExtraICState extra_ic_state,
-      Handle<Object> object,
-      Handle<String> name);
+  // Compute a monomorphic stub if possible, otherwise return a null handle.
+  Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
+                                      State state,
+                                      Code::ExtraICState extra_state,
+                                      Handle<Object> object,
+                                      Handle<String> name);
 
-  // Update the inline cache and the global stub cache based on the
-  // lookup result.
+  // Update the inline cache and the global stub cache based on the lookup
+  // result.
   void UpdateCaches(LookupResult* lookup,
                     State state,
                     Code::ExtraICState extra_ic_state,
                     Handle<Object> object,
                     Handle<String> name);
 
-  // Returns a JSFunction if the object can be called as a function,
-  // and patches the stack to be ready for the call.
-  // Otherwise, it returns the undefined value.
-  Object* TryCallAsFunction(Object* object);
+  // Returns a JSFunction if the object can be called as a function, and
+  // patches the stack to be ready for the call.  Otherwise, it returns the
+  // undefined value.
+  Handle<Object> TryCallAsFunction(Handle<Object> object);
 
   void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
 
   static void Clear(Address address, Code* target);
 
+  // Platform-specific code generation functions used by both call and
+  // keyed call.
+  static void GenerateMiss(MacroAssembler* masm,
+                           int argc,
+                           IC::UtilityId id,
+                           Code::ExtraICState extra_state);
+
+  static void GenerateNormal(MacroAssembler* masm, int argc);
+
+  static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                            int argc,
+                                            Code::Kind kind,
+                                            Code::ExtraICState extra_state);
+
+  Code::Kind kind_;
+
   friend class IC;
 };
 
@@ -252,16 +265,24 @@
   // Code generator routines.
   static void GenerateInitialize(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_ic_state) {
-    GenerateMiss(masm, argc, extra_ic_state);
+                                 Code::ExtraICState extra_state) {
+    GenerateMiss(masm, argc, extra_state);
   }
+
   static void GenerateMiss(MacroAssembler* masm,
                            int argc,
-                           Code::ExtraICState extra_ic_state);
+                           Code::ExtraICState extra_state) {
+    CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
+  }
+
   static void GenerateMegamorphic(MacroAssembler* masm,
                                   int argc,
                                   Code::ExtraICState extra_ic_state);
-  static void GenerateNormal(MacroAssembler* masm, int argc);
+
+  static void GenerateNormal(MacroAssembler* masm, int argc) {
+    CallICBase::GenerateNormal(masm, argc);
+    GenerateMiss(masm, argc, Code::kNoExtraICState);
+  }
 };
 
 
@@ -280,7 +301,12 @@
   static void GenerateInitialize(MacroAssembler* masm, int argc) {
     GenerateMiss(masm, argc);
   }
-  static void GenerateMiss(MacroAssembler* masm, int argc);
+
+  static void GenerateMiss(MacroAssembler* masm, int argc) {
+    CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
+                             Code::kNoExtraICState);
+  }
+
   static void GenerateMegamorphic(MacroAssembler* masm, int argc);
   static void GenerateNormal(MacroAssembler* masm, int argc);
   static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
@@ -321,17 +347,15 @@
                     Handle<String> name);
 
   // Stub accessors.
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kLoadIC_Megamorphic);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->LoadIC_Megamorphic();
   }
   static Code* initialize_stub() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kLoadIC_Initialize);
   }
-  Code* pre_monomorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kLoadIC_PreMonomorphic);
+  Handle<Code> pre_monomorphic_stub() {
+    return isolate()->builtins()->LoadIC_PreMonomorphic();
   }
 
   static void Clear(Address address, Code* target);
@@ -342,41 +366,53 @@
 
 class KeyedIC: public IC {
  public:
+  enum StubKind {
+    LOAD,
+    STORE_NO_TRANSITION,
+    STORE_TRANSITION_SMI_TO_OBJECT,
+    STORE_TRANSITION_SMI_TO_DOUBLE,
+    STORE_TRANSITION_DOUBLE_TO_OBJECT
+  };
   explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
   virtual ~KeyedIC() {}
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
       ElementsKind elements_kind) = 0;
 
  protected:
-  virtual Code* string_stub() {
-    return NULL;
+  virtual Handle<Code> string_stub() {
+    return Handle<Code>::null();
   }
 
   virtual Code::Kind kind() const = 0;
 
-  MaybeObject* ComputeStub(JSObject* receiver,
-                           bool is_store,
+  Handle<Code> ComputeStub(Handle<JSObject> receiver,
+                           StubKind stub_kind,
                            StrictModeFlag strict_mode,
-                           Code* default_stub);
+                           Handle<Code> default_stub);
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode) = 0;
+  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                              StrictModeFlag strict_mode) = 0;
 
- private:
-  void GetReceiverMapsForStub(Code* stub, MapList* result);
-
-  MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
-      Map* receiver_map,
+  Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
+      Handle<Map> receiver_map,
       StrictModeFlag strict_mode);
 
-  MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
-                                      bool is_store,
+ private:
+  void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
+
+  Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
+                                      StubKind stub_kind,
                                       StrictModeFlag strict_mode,
-                                      Code* default_stub);
+                                      Handle<Code> default_stub);
+
+  Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+                                     StubKind stub_kind);
+
+  static bool IsTransitionStubKind(StubKind stub_kind) {
+    return stub_kind > STORE_NO_TRANSITION;
+  }
 };
 
 
@@ -412,21 +448,18 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
       ElementsKind elements_kind);
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode);
+  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                              StrictModeFlag strict_mode);
 
-  virtual Code* string_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_String);
+  virtual Handle<Code> string_stub() {
+    return isolate()->builtins()->KeyedLoadIC_String();
   }
 
  private:
@@ -441,25 +474,20 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedLoadIC_Initialize);
   }
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Generic);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->KeyedLoadIC_Generic();
   }
-  Code* generic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Generic);
+  Handle<Code> generic_stub() {
+    return isolate()->builtins()->KeyedLoadIC_Generic();
   }
-  Code* pre_monomorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_PreMonomorphic);
+  Handle<Code> pre_monomorphic_stub() {
+    return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
   }
-  Code* indexed_interceptor_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_IndexedInterceptor);
+  Handle<Code> indexed_interceptor_stub() {
+    return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
   }
-  Code* non_strict_arguments_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_NonStrictArguments);
+  Handle<Code> non_strict_arguments_stub() {
+    return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
   }
 
   static void Clear(Address address, Code* target);
@@ -524,13 +552,11 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kStoreIC_Initialize_Strict);
   }
-  Code* global_proxy_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kStoreIC_GlobalProxy);
+  Handle<Code> global_proxy_stub() {
+    return isolate()->builtins()->StoreIC_GlobalProxy();
   }
-  Code* global_proxy_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kStoreIC_GlobalProxy_Strict);
+  Handle<Code> global_proxy_stub_strict() {
+    return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
   }
 
   static void Clear(Address address, Code* target);
@@ -562,18 +588,18 @@
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
   static void GenerateNonStrictArguments(MacroAssembler* masm);
+  static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
+  static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
 
-  virtual MaybeObject* GetElementStubWithoutMapCheck(
+  virtual Handle<Code> GetElementStubWithoutMapCheck(
       bool is_js_array,
       ElementsKind elements_kind);
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
-  virtual MaybeObject* ConstructMegamorphicStub(
-      MapList* receiver_maps,
-      CodeList* targets,
-      StrictModeFlag strict_mode);
+  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
+                                              StrictModeFlag strict_mode);
 
   private:
   // Update the inline cache.
@@ -596,29 +622,24 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize);
   }
-  Code* megamorphic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic);
-  }
   static Code* initialize_stub_strict() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize_Strict);
   }
-  Code* megamorphic_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic_Strict);
+  Handle<Code> megamorphic_stub() {
+    return isolate()->builtins()->KeyedStoreIC_Generic();
   }
-  Code* generic_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic);
+  Handle<Code> megamorphic_stub_strict() {
+    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
   }
-  Code* generic_stub_strict() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_Generic_Strict);
+  Handle<Code> generic_stub() {
+    return isolate()->builtins()->KeyedStoreIC_Generic();
   }
-  Code* non_strict_arguments_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedStoreIC_NonStrictArguments);
+  Handle<Code> generic_stub_strict() {
+    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+  }
+  Handle<Code> non_strict_arguments_stub() {
+    return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
   }
 
   static void Clear(Address address, Code* target);
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
new file mode 100644
index 0000000..7ae2c99
--- /dev/null
+++ b/src/incremental-marking-inl.h
@@ -0,0 +1,133 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_INL_H_
+#define V8_INCREMENTAL_MARKING_INL_H_
+
+#include "incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+
+    // Object is either grey or white.  It will be scanned if survives.
+    return false;
+  }
+  return true;
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj,
+                                     Object** slot,
+                                     Object* value) {
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteSlow(obj, slot, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
+                                             RelocInfo* rinfo,
+                                             Object* value) {
+  if (IsMarking() && value->NonFailureIsHeapObject()) {
+    RecordWriteIntoCodeSlow(obj, rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+  }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  ASSERT(obj->Size() >= 2*kPointerSize);
+  ASSERT(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+  bytes_scanned_ -= obj_size;
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_gc) {
+        PrintF("Hurrying incremental marking because of lack of progress\n");
+      }
+      allocation_marking_factor_ = kMaxAllocationMarkingFactor;
+    }
+  }
+
+  marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  WhiteToGrey(obj, mark_bit);
+  marking_deque_.PushGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
+  Marking::WhiteToGrey(mark_bit);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
new file mode 100644
index 0000000..c866346
--- /dev/null
+++ b/src/incremental-marking.cc
@@ -0,0 +1,924 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "incremental-marking.h"
+
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+    : heap_(heap),
+      state_(STOPPED),
+      marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
+      steps_count_(0),
+      steps_took_(0),
+      longest_step_(0.0),
+      old_generation_space_available_at_start_of_incremental_(0),
+      old_generation_space_used_at_start_of_incremental_(0),
+      steps_count_since_last_gc_(0),
+      steps_took_since_last_gc_(0),
+      should_hurry_(false),
+      allocation_marking_factor_(0),
+      allocated_(0),
+      no_marking_scope_depth_(0) {
+}
+
+
+void IncrementalMarking::TearDown() {
+  delete marking_deque_memory_;
+}
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(
+          HeapObject::RawField(obj, 0), slot, value);
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
+                                             Object* value,
+                                             Isolate* isolate) {
+  ASSERT(obj->IsHeapObject());
+
+  // Fast cases should already be covered by RecordWriteStub.
+  ASSERT(value->IsHeapObject());
+  ASSERT(!value->IsHeapNumber());
+  ASSERT(!value->IsString() ||
+         value->IsConsString() ||
+         value->IsSlicedString());
+  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
+
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(!marking->is_compacting_);
+  marking->RecordWrite(obj, NULL, value);
+}
+
+
+void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                                          Object** slot,
+                                                          Isolate* isolate) {
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(marking->is_compacting_);
+  marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Code* host,
+                                               Address pc,
+                                               HeapObject* value) {
+  if (IsMarking()) {
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+  if (IsMarking()) {
+    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc);
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+    ASSERT(slot != NULL);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+                                                 RelocInfo* rinfo,
+                                                 Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+    // Object is either grey or white.  It will be scanned if survives.
+    return;
+  }
+
+  if (is_compacting_) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned.  We need to record the slot.
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                       Code::cast(value));
+    }
+  }
+}
+
+
+class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingMarkingVisitor(Heap* heap,
+                                   IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    if (target->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+      MarkObject(target);
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitCodeEntry(Address entry_address) {
+    Object* target = Code::GetObjectFromEntryAddress(entry_address);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitPointer(Object** p) {
+    Object* obj = *p;
+    if (obj->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordSlot(p, p, obj);
+      MarkObject(obj);
+    }
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->NonFailureIsHeapObject()) {
+        heap_->mark_compact_collector()->RecordSlot(start, p, obj);
+        MarkObject(obj);
+      }
+    }
+  }
+
+ private:
+  // Mark object pointed to by p.
+  INLINE(void MarkObject(Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+        MemoryChunk::IncrementLiveBytes(heap_object->address(),
+                                        heap_object->Size());
+      }
+    } else if (Marking::IsWhite(mark_bit)) {
+      incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingRootMarkingVisitor(Heap* heap,
+                                       IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitPointer(Object** p) {
+    MarkObjectByPointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    Object* obj = *p;
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+          MemoryChunk::IncrementLiveBytes(heap_object->address(),
+                                          heap_object->Size());
+      }
+    } else {
+      if (Marking::IsWhite(mark_bit)) {
+        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+      }
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+                                              bool is_marking,
+                                              bool is_compacting) {
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+    // It's difficult to filter out slots recorded for large objects.
+    if (chunk->owner()->identity() == LO_SPACE &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+        is_compacting) {
+      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+  } else if (chunk->owner()->identity() == CELL_SPACE ||
+             chunk->scan_on_scavenge()) {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+                                              bool is_marking) {
+  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, false, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    NewSpace* space) {
+  NewSpacePageIterator it(space);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, false, false);
+    lop = lop->next_page();
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, true, is_compacting_);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, true);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+  ActivateIncrementalWriteBarrier(heap_->old_data_space());
+  ActivateIncrementalWriteBarrier(heap_->cell_space());
+  ActivateIncrementalWriteBarrier(heap_->map_space());
+  ActivateIncrementalWriteBarrier(heap_->code_space());
+  ActivateIncrementalWriteBarrier(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, true, is_compacting_);
+    lop = lop->next_page();
+  }
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+
+  return !FLAG_expose_gc &&
+      FLAG_incremental_marking &&
+      !Serializer::enabled() &&
+      heap_->PromotedSpaceSize() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+  ASSERT(RecordWriteStub::GetMode(stub) ==
+         RecordWriteStub::STORE_BUFFER_ONLY);
+
+  if (!IsMarking()) {
+    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+    // we don't need to do anything if incremental marking is
+    // not active.
+  } else if (IsCompacting()) {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+  } else {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+  }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+    Heap* heap, RecordWriteStub::Mode mode) {
+  NumberDictionary* stubs = heap->code_stubs();
+
+  int capacity = stubs->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = stubs->KeyAt(i);
+    if (stubs->IsKey(k)) {
+      uint32_t key = NumberToUint32(k);
+
+      if (CodeStub::MajorKeyFromKey(key) ==
+          CodeStub::RecordWrite) {
+        Object* e = stubs->ValueAt(i);
+        if (e->IsCode()) {
+          RecordWriteStub::Patch(Code::cast(e), mode);
+        }
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new VirtualMemory(4 * MB);
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+  }
+}
+
+void IncrementalMarking::UncommitMarkingDeque() {
+  if (state_ == STOPPED && marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
+  }
+}
+
+
+void IncrementalMarking::Start() {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start\n");
+  }
+  ASSERT(FLAG_incremental_marking);
+  ASSERT(state_ == STOPPED);
+
+  ResetStepCounters();
+
+  if (heap_->old_pointer_space()->IsSweepingComplete() &&
+      heap_->old_data_space()->IsSweepingComplete()) {
+    StartMarking(ALLOW_COMPACTION);
+  } else {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start sweeping.\n");
+    }
+    state_ = SWEEPING;
+  }
+
+  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+  if (obj->IsHeapObject()) {
+    HeapObject* heap_obj = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+    if (Marking::IsBlack(mark_bit)) {
+      MemoryChunk::IncrementLiveBytes(heap_obj->address(),
+                                      -heap_obj->Size());
+    }
+    Marking::AnyToGrey(mark_bit);
+  }
+}
+
+
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start marking\n");
+  }
+
+  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+      heap_->mark_compact_collector()->StartCompaction();
+
+  state_ = MARKING;
+
+  RecordWriteStub::Mode mode = is_compacting_ ?
+      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+
+  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+  EnsureMarkingDequeIsCommitted();
+
+  // Initialize marking stack.
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_->size();
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
+
+  ActivateIncrementalWriteBarrier();
+
+#ifdef DEBUG
+  // Marking bits are cleared by the sweeper.
+  if (FLAG_verify_heap) {
+    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+  }
+#endif
+
+  heap_->CompletelyClearInstanceofCache();
+  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    // We will mark cache black with a separate pass
+    // when we finish marking.
+    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+  }
+
+  // Mark strong roots grey.
+  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+  // Ready to start incremental marking.
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Running\n");
+  }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+  if (!IsMarking()) return;
+  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+                          heap_->new_space()->FromSpaceEnd());
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+  if (!IsMarking()) return;
+
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
+  HeapObject** array = marking_deque_.array();
+  int new_top = current;
+
+  Map* filler_map = heap_->one_pointer_filler_map();
+
+  while (current != limit) {
+    HeapObject* obj = array[current];
+    ASSERT(obj->IsHeapObject());
+    current = ((current + 1) & mask);
+    if (heap_->InNewSpace(obj)) {
+      MapWord map_word = obj->map_word();
+      if (map_word.IsForwardingAddress()) {
+        HeapObject* dest = map_word.ToForwardingAddress();
+        array[new_top] = dest;
+        new_top = ((new_top + 1) & mask);
+        ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        ASSERT(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+      }
+    } else if (obj->map() != filler_map) {
+      // Skip one word filler objects that appear on the
+      // stack when we perform in place array shift.
+      array[new_top] = obj;
+      new_top = ((new_top + 1) & mask);
+      ASSERT(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        ASSERT(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+    }
+  }
+  marking_deque_.set_top(new_top);
+
+  steps_took_since_last_gc_ = 0;
+  steps_count_since_last_gc_ = 0;
+  longest_step_ = 0.0;
+}
+
+
+void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
+  v->VisitPointers(
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
+
+  MarkCompactCollector* collector = heap_->mark_compact_collector();
+  for (int idx = Context::FIRST_WEAK_SLOT;
+       idx < Context::GLOBAL_CONTEXT_SLOTS;
+       ++idx) {
+    Object** slot =
+        HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
+    collector->RecordSlot(slot, slot, *slot);
+  }
+}
+
+
+void IncrementalMarking::Hurry() {
+  if (state() == MARKING) {
+    double start = 0.0;
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Hurry\n");
+      start = OS::TimeCurrentMillis();
+    }
+    // TODO(gc) hurry can mark objects it encounters black as mutator
+    // was stopped.
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) {
+        continue;
+      } else if (map == global_context_map) {
+        // Global contexts have weak fields.
+        VisitGlobalContext(Context::cast(obj), &marking_visitor);
+      } else {
+        obj->Iterate(&marking_visitor);
+      }
+
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      ASSERT(!Marking::IsBlack(mark_bit));
+      Marking::MarkBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+    }
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking) {
+      double end = OS::TimeCurrentMillis();
+      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+             static_cast<int>(end - start));
+    }
+  }
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+    MemoryChunk::IncrementLiveBytes(poly_cache->address(),
+                                    PolymorphicCodeCache::kSize);
+  }
+
+  Object* context = heap_->global_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    HeapObject* cache = HeapObject::cast(
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
+    if (!cache->IsUndefined()) {
+      MarkBit mark_bit = Marking::MarkBitFrom(cache);
+      if (Marking::IsGrey(mark_bit)) {
+        Marking::GreyToBlack(mark_bit);
+        MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
+      }
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void IncrementalMarking::Abort() {
+  if (IsStopped()) return;
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Aborting.\n");
+  }
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  if (IsMarking()) {
+    PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                            RecordWriteStub::STORE_BUFFER_ONLY);
+    DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+  state_ = STOPPED;
+  is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+  Hurry();
+  state_ = STOPPED;
+  is_compacting_ = false;
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                          RecordWriteStub::STORE_BUFFER_ONLY);
+  DeactivateIncrementalWriteBarrier();
+  ASSERT(marking_deque_.IsEmpty());
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+}
+
+
+void IncrementalMarking::MarkingComplete() {
+  state_ = COMPLETE;
+  // We will set the stack guard to request a GC now.  This will mean the rest
+  // of the GC gets performed as soon as possible (we can't do a GC here in a
+  // record-write context).  If a few things get allocated between now and then
+  // that shouldn't make us do a scavenge and keep being incremental, so we set
+  // the should-hurry flag to indicate that there can't be much work left to do.
+  set_should_hurry(true);
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Complete (normal).\n");
+  }
+  heap_->isolate()->stack_guard()->RequestGC();
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes) {
+  if (heap_->gc_state() != Heap::NOT_IN_GC ||
+      !FLAG_incremental_marking ||
+      !FLAG_incremental_marking_steps ||
+      (state_ != SWEEPING && state_ != MARKING)) {
+    return;
+  }
+
+  allocated_ += allocated_bytes;
+
+  if (allocated_ < kAllocatedThreshold) return;
+
+  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+
+  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+  bytes_scanned_ += bytes_to_process;
+
+  double start = 0;
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    start = OS::TimeCurrentMillis();
+  }
+
+  if (state_ == SWEEPING) {
+    if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
+        heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+      bytes_scanned_ = 0;
+      StartMarking(PREVENT_COMPACTION);
+    }
+  } else if (state_ == MARKING) {
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) continue;
+
+      int size = obj->SizeFromMap(map);
+      bytes_to_process -= size;
+      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+      if (Marking::IsWhite(map_mark_bit)) {
+        WhiteToGreyAndPush(map, map_mark_bit);
+      }
+
+      // TODO(gc) switch to static visitor instead of normal visitor.
+      if (map == global_context_map) {
+        // Global contexts have weak fields.
+        Context* ctx = Context::cast(obj);
+
+        // We will mark cache black with a separate pass
+        // when we finish marking.
+        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
+
+        VisitGlobalContext(ctx, &marking_visitor);
+      } else {
+        obj->IterateBody(map->instance_type(), size, &marking_visitor);
+      }
+
+      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+      SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+                  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+      Marking::MarkBlack(obj_mark_bit);
+      MemoryChunk::IncrementLiveBytes(obj->address(), size);
+    }
+    if (marking_deque_.IsEmpty()) MarkingComplete();
+  }
+
+  allocated_ = 0;
+
+  steps_count_++;
+  steps_count_since_last_gc_++;
+
+  bool speed_up = false;
+
+  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking after %d steps\n",
+             static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+          old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (allocation_marking_factor_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking = heap_->PromotedTotalSize()
+      - old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = allocation_marking_factor_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintF("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintF("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+      allocation_marking_factor_ = static_cast<int>(
+          Min(kMaxAllocationMarkingFactor,
+              static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+      }
+    }
+  }
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    double end = OS::TimeCurrentMillis();
+    double delta = (end - start);
+    longest_step_ = Max(longest_step_, delta);
+    steps_took_ += delta;
+    steps_took_since_last_gc_ += delta;
+  }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+  steps_count_ = 0;
+  steps_took_ = 0;
+  longest_step_ = 0.0;
+  old_generation_space_available_at_start_of_incremental_ =
+      SpaceLeftInOldSpace();
+  old_generation_space_used_at_start_of_incremental_ =
+      heap_->PromotedTotalSize();
+  steps_count_since_last_gc_ = 0;
+  steps_took_since_last_gc_ = 0;
+  bytes_rescanned_ = 0;
+  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+  bytes_scanned_ = 0;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+}
+
+} }  // namespace v8::internal
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
new file mode 100644
index 0000000..b9d83ee
--- /dev/null
+++ b/src/incremental-marking.h
@@ -0,0 +1,275 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_H_
+#define V8_INCREMENTAL_MARKING_H_
+
+
+#include "execution.h"
+#include "mark-compact.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+  enum State {
+    STOPPED,
+    SWEEPING,
+    MARKING,
+    COMPLETE
+  };
+
+  explicit IncrementalMarking(Heap* heap);
+
+  void TearDown();
+
+  State state() {
+    ASSERT(state_ == STOPPED || FLAG_incremental_marking);
+    return state_;
+  }
+
+  bool should_hurry() { return should_hurry_; }
+  void set_should_hurry(bool val) { should_hurry_ = val; }
+
+  inline bool IsStopped() { return state() == STOPPED; }
+
+  INLINE(bool IsMarking()) { return state() >= MARKING; }
+
+  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  bool WorthActivating();
+
+  void Start();
+
+  void Stop();
+
+  void PrepareForScavenge();
+
+  void UpdateMarkingDequeAfterScavenge();
+
+  void Hurry();
+
+  void Finalize();
+
+  void Abort();
+
+  void MarkingComplete();
+
+  // It's hard to know how much work the incremental marker should do to make
+  // progress in the face of the mutator creating new work for it.  We start
+  // of at a moderate rate of work and gradually increase the speed of the
+  // incremental marker until it completes.
+  // Do some marking every time this much memory has been allocated.
+  static const intptr_t kAllocatedThreshold = 65536;
+  // Start off by marking this many times more memory than has been allocated.
+  static const intptr_t kInitialAllocationMarkingFactor = 1;
+  // But if we are promoting a lot of data we need to mark faster to keep up
+  // with the data that is entering the old space through promotion.
+  static const intptr_t kFastMarking = 3;
+  // After this many steps we increase the marking/allocating factor.
+  static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+  // This is how much we increase the marking/allocating factor by.
+  static const intptr_t kAllocationMarkingFactorSpeedup = 2;
+  static const intptr_t kMaxAllocationMarkingFactor = 1000;
+
+  void OldSpaceStep(intptr_t allocated) {
+    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+  }
+  void Step(intptr_t allocated);
+
+  inline void RestartIfNotMarking() {
+    if (state_ == COMPLETE) {
+      state_ = MARKING;
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+      }
+    }
+  }
+
+  static void RecordWriteFromCode(HeapObject* obj,
+                                  Object* value,
+                                  Isolate* isolate);
+
+  static void RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                               Object** slot,
+                                               Isolate* isolate);
+
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWriteIntoCode(HeapObject* obj,
+                                  RelocInfo* rinfo,
+                                  Object* value));
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
+                                     Object** slot,
+                                     Code* value));
+
+
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  void RecordWriteIntoCodeSlow(HeapObject* obj,
+                               RelocInfo* rinfo,
+                               Object* value);
+  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
+  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
+  void RecordCodeTargetPatch(Address pc, HeapObject* value);
+
+  inline void RecordWrites(HeapObject* obj);
+
+  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
+
+  // Does white->black or keeps gray or black color. Returns true if converting
+  // white to black.
+  inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
+    ASSERT(!Marking::IsImpossible(mark_bit));
+    if (mark_bit.Get()) {
+      // Grey or black: Keep the color.
+      return false;
+    }
+    mark_bit.Set();
+    ASSERT(Marking::IsBlack(mark_bit));
+    return true;
+  }
+
+  inline int steps_count() {
+    return steps_count_;
+  }
+
+  inline double steps_took() {
+    return steps_took_;
+  }
+
+  inline double longest_step() {
+    return longest_step_;
+  }
+
+  inline int steps_count_since_last_gc() {
+    return steps_count_since_last_gc_;
+  }
+
+  inline double steps_took_since_last_gc() {
+    return steps_took_since_last_gc_;
+  }
+
+  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+  }
+
+  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+    SetNewSpacePageFlags(chunk, IsMarking());
+  }
+
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+  void ActivateGeneratedStub(Code* stub);
+
+  void NotifyOfHighPromotionRate() {
+    if (IsMarking()) {
+      if (allocation_marking_factor_ < kFastMarking) {
+        if (FLAG_trace_gc) {
+          PrintF("Increasing marking speed to %d due to high promotion rate\n",
+                 static_cast<int>(kFastMarking));
+        }
+        allocation_marking_factor_ = kFastMarking;
+      }
+    }
+  }
+
+  void EnterNoMarkingScope() {
+    no_marking_scope_depth_++;
+  }
+
+  void LeaveNoMarkingScope() {
+    no_marking_scope_depth_--;
+  }
+
+  void UncommitMarkingDeque();
+
+ private:
+  int64_t SpaceLeftInOldSpace();
+
+  void ResetStepCounters();
+
+  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+  void StartMarking(CompactionFlag flag);
+
+  void ActivateIncrementalWriteBarrier(PagedSpace* space);
+  static void ActivateIncrementalWriteBarrier(NewSpace* space);
+  void ActivateIncrementalWriteBarrier();
+
+  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+  void DeactivateIncrementalWriteBarrier();
+
+  static void SetOldSpacePageFlags(MemoryChunk* chunk,
+                                   bool is_marking,
+                                   bool is_compacting);
+
+  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+  void EnsureMarkingDequeIsCommitted();
+
+  void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+
+  Heap* heap_;
+
+  State state_;
+  bool is_compacting_;
+
+  VirtualMemory* marking_deque_memory_;
+  bool marking_deque_memory_committed_;
+  MarkingDeque marking_deque_;
+
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int64_t old_generation_space_available_at_start_of_incremental_;
+  int64_t old_generation_space_used_at_start_of_incremental_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+  int64_t bytes_rescanned_;
+  bool should_hurry_;
+  int allocation_marking_factor_;
+  intptr_t bytes_scanned_;
+  intptr_t allocated_;
+
+  int no_marking_scope_depth_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_H_
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 796a447..b337e88 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -33,9 +33,9 @@
 #include "utils.h"
 #include "ast.h"
 #include "bytecodes-irregexp.h"
+#include "jsregexp.h"
 #include "interpreter-irregexp.h"
 
-
 namespace v8 {
 namespace internal {
 
@@ -187,12 +187,12 @@
 
 
 template <typename Char>
-static bool RawMatch(Isolate* isolate,
-                     const byte* code_base,
-                     Vector<const Char> subject,
-                     int* registers,
-                     int current,
-                     uint32_t current_char) {
+static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
+                                           const byte* code_base,
+                                           Vector<const Char> subject,
+                                           int* registers,
+                                           int current,
+                                           uint32_t current_char) {
   const byte* pc = code_base;
   // BacktrackStack ensures that the memory allocated for the backtracking stack
   // is returned to the system or cached if there is no stack being cached at
@@ -211,24 +211,24 @@
     switch (insn & BYTECODE_MASK) {
       BYTECODE(BREAK)
         UNREACHABLE();
-        return false;
+        return RegExpImpl::RE_FAILURE;
       BYTECODE(PUSH_CP)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = current;
         pc += BC_PUSH_CP_LENGTH;
         break;
       BYTECODE(PUSH_BT)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = Load32Aligned(pc + 4);
         pc += BC_PUSH_BT_LENGTH;
         break;
       BYTECODE(PUSH_REGISTER)
         if (--backtrack_stack_space < 0) {
-          return false;  // No match on backtrack stack overflow.
+          return RegExpImpl::RE_EXCEPTION;
         }
         *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
         pc += BC_PUSH_REGISTER_LENGTH;
@@ -278,9 +278,9 @@
         pc += BC_POP_REGISTER_LENGTH;
         break;
       BYTECODE(FAIL)
-        return false;
+        return RegExpImpl::RE_FAILURE;
       BYTECODE(SUCCEED)
-        return true;
+        return RegExpImpl::RE_SUCCESS;
       BYTECODE(ADVANCE_CP)
         current += insn >> BYTECODE_SHIFT;
         pc += BC_ADVANCE_CP_LENGTH;
@@ -625,11 +625,12 @@
 }
 
 
-bool IrregexpInterpreter::Match(Isolate* isolate,
-                                Handle<ByteArray> code_array,
-                                Handle<String> subject,
-                                int* registers,
-                                int start_position) {
+RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
+    Isolate* isolate,
+    Handle<ByteArray> code_array,
+    Handle<String> subject,
+    int* registers,
+    int start_position) {
   ASSERT(subject->IsFlat());
 
   AssertNoAllocation a;
diff --git a/src/interpreter-irregexp.h b/src/interpreter-irregexp.h
index 076f0c5..0f45d98 100644
--- a/src/interpreter-irregexp.h
+++ b/src/interpreter-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,11 +36,11 @@
 
 class IrregexpInterpreter {
  public:
-  static bool Match(Isolate* isolate,
-                    Handle<ByteArray> code,
-                    Handle<String> subject,
-                    int* captures,
-                    int start_position);
+  static RegExpImpl::IrregexpResult Match(Isolate* isolate,
+                                          Handle<ByteArray> code,
+                                          Handle<String> subject,
+                                          int* captures,
+                                          int start_position);
 };
 
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index aa6b537..0a2c174 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -36,6 +36,19 @@
 namespace internal {
 
 
+SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+  if (isolate->context() != NULL) {
+    context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+    dummy_ = Handle<Context>(isolate->context());
+#endif
+  }
+  isolate->set_save_context(this);
+
+  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+
 bool Isolate::DebuggerHasBreakPoints() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   return debug()->has_break_points();
diff --git a/src/isolate.cc b/src/isolate.cc
index fd0f673..c235a23 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -98,6 +98,15 @@
   failed_access_check_callback_ = NULL;
   save_context_ = NULL;
   catcher_ = NULL;
+  top_lookup_result_ = NULL;
+
+  // These members are re-initialized later after deserialization
+  // is complete.
+  pending_exception_ = NULL;
+  has_pending_message_ = false;
+  pending_message_obj_ = NULL;
+  pending_message_script_ = NULL;
+  scheduled_exception_ = NULL;
 }
 
 
@@ -472,6 +481,9 @@
   for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
     it.frame()->Iterate(v);
   }
+
+  // Iterate pointers in live lookup results.
+  thread->top_lookup_result_->Iterate(v);
 }
 
 
@@ -1060,6 +1072,16 @@
       message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
           location, HandleVector<Object>(&exception_handle, 1), stack_trace,
           stack_trace_object);
+    } else if (location != NULL && !location->script().is_null()) {
+      // We are bootstrapping and caught an error where the location is set
+      // and we have a script for the location.
+      // In this case we could have an extension (or an internal error
+      // somewhere) and we print out the line number at which the error occured
+      // to the console for easier debugging.
+      int line_number = GetScriptLineNumberSafe(location->script(),
+                                                location->start_pos());
+      OS::PrintError("Extension or internal compilation error at line %d.\n",
+                     line_number);
     }
   }
 
@@ -1284,6 +1306,9 @@
   memcpy(to, reinterpret_cast<char*>(thread_local_top()),
          sizeof(ThreadLocalTop));
   InitializeThreadLocal();
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
   return to + sizeof(ThreadLocalTop);
 }
 
@@ -1403,11 +1428,13 @@
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
-      pc_to_code_cache_(NULL),
+      inner_pointer_to_code_cache_(NULL),
       write_input_buffer_(NULL),
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
+      fp_stubs_generated_(false),
+      has_installed_extensions_(false),
       string_tracker_(NULL),
       regexp_stack_(NULL),
       embedder_data_(NULL) {
@@ -1575,8 +1602,8 @@
   compilation_cache_ = NULL;
   delete bootstrapper_;
   bootstrapper_ = NULL;
-  delete pc_to_code_cache_;
-  pc_to_code_cache_ = NULL;
+  delete inner_pointer_to_code_cache_;
+  inner_pointer_to_code_cache_ = NULL;
   delete write_input_buffer_;
   write_input_buffer_ = NULL;
 
@@ -1610,9 +1637,6 @@
 void Isolate::InitializeThreadLocal() {
   thread_local_top_.isolate_ = this;
   thread_local_top_.Initialize();
-  clear_pending_exception();
-  clear_pending_message();
-  clear_scheduled_exception();
 }
 
 
@@ -1700,7 +1724,7 @@
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
-  pc_to_code_cache_ = new PcToCodeCache(this);
+  inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
@@ -1767,9 +1791,14 @@
   // If we are deserializing, read the state into the now-empty heap.
   if (des != NULL) {
     des->Deserialize();
-    stub_cache_->Clear();
+    stub_cache_->Initialize(true);
   }
 
+  // Finish initialization of ThreadLocal after deserialization is done.
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
+
   // Deserializing may put strange things in the root array's copy of the
   // stack guard.
   heap_.SetStackLimits();
diff --git a/src/isolate.h b/src/isolate.h
index 2582da6..2ea9b80 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -66,7 +66,7 @@
 class HeapProfiler;
 class InlineRuntimeFunctionsTable;
 class NoAllocationStringAllocator;
-class PcToCodeCache;
+class InnerPointerToCodeCache;
 class PreallocatedMemoryThread;
 class RegExpStack;
 class SaveContext;
@@ -255,6 +255,9 @@
   // Call back function to report unsafe JS accesses.
   v8::FailedAccessCheckCallback failed_access_check_callback_;
 
+  // Head of the list of live LookupResults.
+  LookupResult* top_lookup_result_;
+
   // Whether out of memory exceptions should be ignored.
   bool ignore_out_of_memory_;
 
@@ -311,7 +314,6 @@
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
   V(int, suffix_table, (kBMMaxShift + 1))                                      \
-  V(uint32_t, random_seed, 2)                                                  \
   V(uint32_t, private_random_seed, 2)                                          \
   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
 
@@ -841,7 +843,9 @@
     return unicode_cache_;
   }
 
-  PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
+    return inner_pointer_to_code_cache_;
+  }
 
   StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
 
@@ -879,12 +883,24 @@
 
   RuntimeState* runtime_state() { return &runtime_state_; }
 
+  void set_fp_stubs_generated(bool value) {
+    fp_stubs_generated_ = value;
+  }
+
+  bool fp_stubs_generated() { return fp_stubs_generated_; }
+
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
 
   Builtins* builtins() { return &builtins_; }
 
+  void NotifyExtensionInstalled() {
+    has_installed_extensions_ = true;
+  }
+
+  bool has_installed_extensions() { return has_installed_extensions_; }
+
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       regexp_macro_assembler_canonicalize() {
     return &regexp_macro_assembler_canonicalize_;
@@ -987,6 +1003,13 @@
   void SetData(void* data) { embedder_data_ = data; }
   void* GetData() { return embedder_data_; }
 
+  LookupResult* top_lookup_result() {
+    return thread_local_top_.top_lookup_result_;
+  }
+  void SetTopLookupResult(LookupResult* top) {
+    thread_local_top_.top_lookup_result_ = top;
+  }
+
  private:
   Isolate();
 
@@ -1130,14 +1153,16 @@
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
-  PcToCodeCache* pc_to_code_cache_;
+  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   StringInputBuffer* write_input_buffer_;
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
   RuntimeState runtime_state_;
+  bool fp_stubs_generated_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
+  bool has_installed_extensions_;
   StringTracker* string_tracker_;
   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1210,19 +1235,7 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
-    if (isolate->context() != NULL) {
-      context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
-      dummy_ = Handle<Context>(isolate->context());
-#endif
-    }
-    isolate->set_save_context(this);
-
-    // If there is no JS frame under the current C frame, use the value 0.
-    JavaScriptFrameIterator it(isolate);
-    js_sp_ = it.done() ? 0 : it.frame()->sp();
-  }
+  inline explicit SaveContext(Isolate* isolate);
 
   ~SaveContext() {
     if (context_.is_null()) {
@@ -1240,8 +1253,8 @@
   SaveContext* prev() { return prev_; }
 
   // Returns true if this save context is below a given JavaScript frame.
-  bool below(JavaScriptFrame* frame) {
-    return (js_sp_ == 0) || (frame->sp() < js_sp_);
+  bool IsBelowFrame(JavaScriptFrame* frame) {
+    return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
   }
 
  private:
@@ -1250,7 +1263,7 @@
   Handle<Context> dummy_;
 #endif
   SaveContext* prev_;
-  Address js_sp_;  // The top JS frame's sp when saving context.
+  Address c_entry_fp_;
 };
 
 
diff --git a/src/json-parser.h b/src/json-parser.h
index 68eab65..ca796a6 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -165,7 +165,7 @@
 
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
-  isolate_ = source->map()->isolate();
+  isolate_ = source->map()->GetHeap()->isolate();
   FlattenString(source);
   source_ = source;
   source_length_ = source_->length();
diff --git a/src/json.js b/src/json.js
index deba126..ccef445 100644
--- a/src/json.js
+++ b/src/json.js
@@ -345,4 +345,4 @@
   ));
 }
 
-SetUpJSON()
+SetUpJSON();
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 3ebfbdf..18ff257 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -68,9 +68,9 @@
                                                Handle<String> flags,
                                                bool* has_pending_exception) {
   // Call the construct code with 2 arguments.
-  Object** argv[2] = { Handle<Object>::cast(pattern).location(),
-                       Handle<Object>::cast(flags).location() };
-  return Execution::New(constructor, 2, argv, has_pending_exception);
+  Handle<Object> argv[] = { pattern, flags };
+  return Execution::New(constructor, ARRAY_SIZE(argv), argv,
+                        has_pending_exception);
 }
 
 
@@ -509,14 +509,16 @@
   }
   Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
 
-  if (IrregexpInterpreter::Match(isolate,
-                                 byte_codes,
-                                 subject,
-                                 register_vector,
-                                 index)) {
-    return RE_SUCCESS;
+  IrregexpResult result = IrregexpInterpreter::Match(isolate,
+                                                     byte_codes,
+                                                     subject,
+                                                     register_vector,
+                                                     index);
+  if (result == RE_EXCEPTION) {
+    ASSERT(!isolate->has_pending_exception());
+    isolate->StackOverflow();
   }
-  return RE_FAILURE;
+  return result;
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -4723,7 +4725,6 @@
 
 
 const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-const DispatchTable::Entry DispatchTable::Config::kNoValue;
 
 
 void DispatchTable::AddRange(CharacterRange full_range, int value) {
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 54297a4..df110d1 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_JSREGEXP_H_
 
 #include "allocation.h"
+#include "assembler.h"
 #include "zone-inl.h"
 
 namespace v8 {
@@ -388,7 +389,7 @@
     typedef uc16 Key;
     typedef Entry Value;
     static const uc16 kNoKey;
-    static const Entry kNoValue;
+    static const Entry NoValue() { return Value(); }
     static inline int Compare(uc16 a, uc16 b) {
       if (a == b)
         return 0;
diff --git a/src/list-inl.h b/src/list-inl.h
index 80bccc9..e2c358c 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -216,11 +216,11 @@
     int mid = (low + high) / 2;
     T mid_elem = list[mid];
 
-    if (mid_elem > elem) {
+    if (cmp(&mid_elem, &elem) > 0) {
       high = mid - 1;
       continue;
     }
-    if (mid_elem < elem) {
+    if (cmp(&mid_elem, &elem) < 0) {
       low = mid + 1;
       continue;
     }
@@ -236,6 +236,7 @@
   return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
 }
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_LIST_INL_H_
diff --git a/src/list.h b/src/list.h
index 0558709..57504e0 100644
--- a/src/list.h
+++ b/src/list.h
@@ -165,8 +165,11 @@
 
 class Map;
 class Code;
+template<typename T> class Handle;
 typedef List<Map*> MapList;
 typedef List<Code*> CodeList;
+typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
 // list. Returns the index of the element of -1 if it was not found.
@@ -176,6 +179,7 @@
 template <typename T>
 int SortedListBSearch(const List<T>& list, T elem);
 
+
 } }  // namespace v8::internal
 
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 4661106..c4d8b1e 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -152,8 +152,8 @@
 LiveRange::LiveRange(int id)
     : id_(id),
       spilled_(false),
+      is_double_(false),
       assigned_register_(kInvalidAssignment),
-      assigned_register_kind_(NONE),
       last_interval_(NULL),
       first_interval_(NULL),
       first_pos_(NULL),
@@ -169,7 +169,7 @@
 void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
   ASSERT(!HasRegisterAssigned() && !IsSpilled());
   assigned_register_ = reg;
-  assigned_register_kind_ = register_kind;
+  is_double_ = (register_kind == DOUBLE_REGISTERS);
   ConvertOperands();
 }
 
@@ -234,7 +234,8 @@
   // at the current or the immediate next position.
   UsePosition* use_pos = NextRegisterPosition(pos);
   if (use_pos == NULL) return true;
-  return use_pos->pos().Value() > pos.NextInstruction().Value();
+  return
+      use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
 }
 
 
@@ -555,7 +556,7 @@
       reusable_slots_(8),
       next_virtual_register_(num_values),
       first_artificial_register_(num_values),
-      mode_(NONE),
+      mode_(GENERAL_REGISTERS),
       num_registers_(-1),
       graph_(graph),
       has_osr_entry_(false) {}
@@ -1043,11 +1044,13 @@
       // it into a location different from the operand of a live range
       // covering a branch instruction.
       // Thus we need to manually record a pointer.
-      if (phi->representation().IsTagged()) {
-        LInstruction* branch =
-            InstructionAt(cur_block->last_instruction_index());
-        if (branch->HasPointerMap()) {
+      LInstruction* branch =
+          InstructionAt(cur_block->last_instruction_index());
+      if (branch->HasPointerMap()) {
+        if (phi->representation().IsTagged()) {
           branch->pointer_map()->RecordPointer(phi_operand);
+        } else if (!phi->representation().IsDouble()) {
+          branch->pointer_map()->RecordUntagged(phi_operand);
         }
       }
     }
@@ -1142,10 +1145,13 @@
         // it into a location different from the operand of a live range
         // covering a branch instruction.
         // Thus we need to manually record a pointer.
-        if (HasTaggedValue(range->id())) {
-          LInstruction* branch = InstructionAt(pred->last_instruction_index());
-          if (branch->HasPointerMap()) {
+        LInstruction* branch = InstructionAt(pred->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          if (HasTaggedValue(range->id())) {
             branch->pointer_map()->RecordPointer(cur_op);
+          } else if (!cur_op->IsDoubleStackSlot() &&
+                     !cur_op->IsDoubleRegister()) {
+            branch->pointer_map()->RemovePointer(cur_op);
           }
         }
       }
@@ -1461,7 +1467,6 @@
 void LAllocator::AllocateGeneralRegisters() {
   HPhase phase("Allocate general registers", this);
   num_registers_ = Register::kNumAllocatableRegisters;
-  mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
 
@@ -1475,7 +1480,6 @@
 
 
 void LAllocator::AllocateRegisters() {
-  ASSERT(mode_ != NONE);
   ASSERT(unhandled_live_ranges_.is_empty());
 
   for (int i = 0; i < live_ranges_.length(); ++i) {
@@ -1580,7 +1584,6 @@
 
 
 const char* LAllocator::RegisterName(int allocation_index) {
-  ASSERT(mode_ != NONE);
   if (mode_ == GENERAL_REGISTERS) {
     return Register::AllocationIndexToString(allocation_index);
   } else {
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index e4e6497..610beef 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -146,7 +146,6 @@
 
 
 enum RegisterKind {
-  NONE,
   GENERAL_REGISTERS,
   DOUBLE_REGISTERS
 };
@@ -319,7 +318,7 @@
   // live range to the result live range.
   void SplitAt(LifetimePosition position, LiveRange* result);
 
-  bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
+  bool IsDouble() const { return is_double_; }
   bool HasRegisterAssigned() const {
     return assigned_register_ != kInvalidAssignment;
   }
@@ -377,8 +376,8 @@
 
   int id_;
   bool spilled_;
+  bool is_double_;
   int assigned_register_;
-  RegisterKind assigned_register_kind_;
   UseInterval* last_interval_;
   UseInterval* first_interval_;
   UsePosition* first_pos_;
diff --git a/src/lithium.cc b/src/lithium.cc
index 5410f6f..31b1698 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -156,6 +156,27 @@
 }
 
 
+void LPointerMap::RemovePointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void LPointerMap::RecordUntagged(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op);
+}
+
+
 void LPointerMap::PrintTo(StringStream* stream) {
   stream->Add("{");
   for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -182,6 +203,7 @@
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
       return 3;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/src/lithium.h b/src/lithium.h
index a933f72..b605eb9 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -407,9 +407,18 @@
 class LPointerMap: public ZoneObject {
  public:
   explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+      : pointer_operands_(8),
+        untagged_operands_(0),
+        position_(position),
+        lithium_position_(-1) { }
 
-  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  const ZoneList<LOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
   int position() const { return position_; }
   int lithium_position() const { return lithium_position_; }
 
@@ -419,10 +428,13 @@
   }
 
   void RecordPointer(LOperand* op);
+  void RemovePointer(LOperand* op);
+  void RecordUntagged(LOperand* op);
   void PrintTo(StringStream* stream);
 
  private:
   ZoneList<LOperand*> pointer_operands_;
+  ZoneList<LOperand*> untagged_operands_;
   int position_;
   int lithium_position_;
 };
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index e05c53c..c94a3ee 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -325,9 +325,10 @@
             if (old_node.children[i].live_shared_function_infos) {
               old_node.children[i].live_shared_function_infos.
                   forEach(function (old_child_info) {
-                    %LiveEditReplaceRefToNestedFunction(old_info.info,
-                                                        corresponding_child_info,
-                                                        old_child_info.info);
+                    %LiveEditReplaceRefToNestedFunction(
+                        old_info.info,
+                        corresponding_child_info,
+                        old_child_info.info);
                   });
             }
           }
@@ -381,7 +382,7 @@
           position: break_point_position,
           line: break_point.line(),
           column: break_point.column()
-      }
+      };
       break_point_old_positions.push(old_position_description);
     }
 
@@ -418,7 +419,7 @@
             position: updated_position,
             line: new_location.line,
             column: new_location.column
-        }
+        };
 
         break_point.set(original_script);
 
@@ -428,7 +429,7 @@
           new_positions: new_position_description
           } );
       }
-    }
+    };
   }
 
 
@@ -465,7 +466,7 @@
   }
   PosTranslator.prototype.GetChunks = function() {
     return this.chunks;
-  }
+  };
 
   PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
     var array = this.chunks;
@@ -492,18 +493,18 @@
       inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
     }
     return inside_chunk_handler(pos, chunk);
-  }
+  };
 
   PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
     Assert(false, "Cannot translate position in changed area");
-  }
+  };
 
   PosTranslator.ShiftWithTopInsideChunkHandler =
       function(pos, diff_chunk) {
     // We carelessly do not check whether we stay inside the chunk after
     // translation.
     return pos - diff_chunk.pos1 + diff_chunk.pos2;
-  }
+  };
 
   var FunctionStatus = {
       // No change to function or its inner functions; however its positions
@@ -517,7 +518,7 @@
       CHANGED: "changed",
       // Function is changed but cannot be patched.
       DAMAGED: "damaged"
-  }
+  };
 
   function CodeInfoTreeNode(code_info, children, array_index) {
     this.info = code_info;
@@ -585,14 +586,14 @@
     var chunk_it = new function() {
       var chunk_index = 0;
       var pos_diff = 0;
-      this.current = function() { return chunks[chunk_index]; }
+      this.current = function() { return chunks[chunk_index]; };
       this.next = function() {
         var chunk = chunks[chunk_index];
         pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
         chunk_index++;
-      }
-      this.done = function() { return chunk_index >= chunks.length; }
-      this.TranslatePos = function(pos) { return pos + pos_diff; }
+      };
+      this.done = function() { return chunk_index >= chunks.length; };
+      this.TranslatePos = function(pos) { return pos + pos_diff; };
     };
 
     // A recursive function that processes internals of a function and all its
@@ -946,16 +947,16 @@
       BLOCKED_ON_OTHER_STACK: 3,
       BLOCKED_UNDER_NATIVE_CODE: 4,
       REPLACED_ON_ACTIVE_STACK: 5
-  }
+  };
 
   FunctionPatchabilityStatus.SymbolName = function(code) {
-    var enum = FunctionPatchabilityStatus;
-    for (name in enum) {
-      if (enum[name] == code) {
+    var enumeration = FunctionPatchabilityStatus;
+    for (name in enumeration) {
+      if (enumeration[name] == code) {
         return name;
       }
     }
-  }
+  };
 
 
   // A logical failure in liveedit process. This means that change_log
@@ -968,7 +969,7 @@
 
   Failure.prototype.toString = function() {
     return "LiveEdit Failure: " + this.message;
-  }
+  };
 
   // A testing entry.
   function GetPcFromSourcePos(func, source_pos) {
@@ -1078,5 +1079,5 @@
     PosTranslator: PosTranslator,
     CompareStrings: CompareStrings,
     ApplySingleChunkPatch: ApplySingleChunkPatch
-  }
-}
+  };
+};
diff --git a/src/liveedit.cc b/src/liveedit.cc
index d44c2fc..eb183da 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -602,7 +602,8 @@
   // Build AST.
   CompilationInfo info(script);
   info.MarkAsGlobal();
-  if (ParserApi::Parse(&info)) {
+  // Parse and don't allow skipping lazy functions.
+  if (ParserApi::Parse(&info, kNoParsingFlags)) {
     // Compile the code.
     LiveEditFunctionTracker tracker(info.isolate(), info.function());
     if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -797,7 +798,7 @@
     HandleScope scope;
     FunctionInfoWrapper info = FunctionInfoWrapper::Create();
     info.SetInitialProperties(fun->name(), fun->start_position(),
-                              fun->end_position(), fun->num_parameters(),
+                              fun->end_position(), fun->parameter_count(),
                               current_parent_index_);
     current_parent_index_ = len_;
     SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -855,38 +856,20 @@
       return HEAP->undefined_value();
     }
     do {
-      ZoneList<Variable*> list(10);
-      outer_scope->CollectUsedVariables(&list);
-      int j = 0;
-      for (int i = 0; i < list.length(); i++) {
-        Variable* var1 = list[i];
-        if (var1->IsContextSlot()) {
-          if (j != i) {
-            list[j] = var1;
-          }
-          j++;
-        }
-      }
+      ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
+      ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
+      outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+      context_list.Sort(&Variable::CompareIndex);
 
-      // Sort it.
-      for (int k = 1; k < j; k++) {
-        int l = k;
-        for (int m = k + 1; m < j; m++) {
-          if (list[l]->index() > list[m]->index()) {
-            l = m;
-          }
-        }
-        list[k] = list[l];
-      }
-      for (int i = 0; i < j; i++) {
+      for (int i = 0; i < context_list.length(); i++) {
         SetElementNonStrict(scope_info_list,
                             scope_info_length,
-                            list[i]->name());
+                            context_list[i]->name());
         scope_info_length++;
         SetElementNonStrict(
             scope_info_list,
             scope_info_length,
-            Handle<Smi>(Smi::FromInt(list[i]->index())));
+            Handle<Smi>(Smi::FromInt(context_list[i]->index())));
         scope_info_length++;
       }
       SetElementNonStrict(scope_info_list,
@@ -1000,6 +983,7 @@
 static void ReplaceCodeObject(Code* original, Code* substitution) {
   ASSERT(!HEAP->InNewSpace(substitution));
 
+  HeapIterator iterator;
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
@@ -1016,7 +1000,6 @@
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
@@ -1101,12 +1084,14 @@
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(shared_info->code())) {
     Handle<Code> code = compile_info_wrapper.GetFunctionCode();
     ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
-      shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
+      shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
     }
   }
 
@@ -1271,7 +1256,8 @@
 
 // Patch positions in code (changes relocation info section) and possibly
 // returns new instance of code.
-static Handle<Code> PatchPositionsInCode(Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(
+    Handle<Code> code,
     Handle<JSArray> position_change_array) {
 
   RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1286,7 +1272,7 @@
         int new_position = TranslatePosition(position,
                                              position_change_array);
         if (position != new_position) {
-          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
+          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
           buffer_writer.Write(&info_copy);
           continue;
         }
@@ -1333,6 +1319,8 @@
   info->set_end_position(new_function_end);
   info->set_function_token_position(new_function_token_pos);
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
     Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 957c051..408e2a3 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -1085,7 +1085,7 @@
 static int CountHeapObjects() {
   int count = 0;
   // Iterate over all the heap spaces and count the number of objects.
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     count++;
@@ -1122,7 +1122,7 @@
   // allocation, and we need allocate below.
   {
     // Iterate over all the heap spaces and add the objects.
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     HeapObject* heap_obj = NULL;
     bool failed = false;
     while (!failed && (heap_obj = iterator.next()) != NULL) {
@@ -1336,7 +1336,9 @@
   // Allocate the JSArray of the elements.
   Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
   if (elements->IsFailure()) return Object::cast(*elements);
-  Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+
+  maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Set body.elements.
   Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1462,7 +1464,9 @@
   Handle<JSObject> summary_obj =
     factory->NewJSObject(isolate->array_function());
   if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-  Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+
+  maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Create the body object.
   Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1589,7 +1593,9 @@
 
   // Return the result as a JS array.
   Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(lols)->SetContent(*list);
+
+  maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
@@ -2507,7 +2513,7 @@
   OS::Print("  Start verify ...\n");
   OS::Print("  Verifying ...");
   Flush();
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     number_of_heap_objects++;
@@ -2613,7 +2619,7 @@
     HeapObject* heap_obj = it.Obj();
     if (heap->InFromSpace(heap_obj)) {
       OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
-                i++, heap_obj, heap->new_space()->FromSpaceLow());
+                i++, heap_obj, Heap::new_space()->FromSpaceStart());
     }
   }
 }
diff --git a/src/log.cc b/src/log.cc
index 3d66b5f..eab2639 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1356,12 +1356,12 @@
 
 static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
                                       Handle<Code>* code_objects) {
+  HeapIterator iterator;
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
 
   // Iterate the heap to find shared function info objects and record
   // the unoptimized code for them.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1450,6 +1450,8 @@
   const char arch[] = "x64";
 #elif V8_TARGET_ARCH_ARM
   const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+  const char arch[] = "mips";
 #else
   const char arch[] = "unknown";
 #endif
@@ -1519,8 +1521,9 @@
 
 
 void Logger::LogCodeObjects() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HeapIterator iterator;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
   }
@@ -1573,6 +1576,7 @@
 
 
 void Logger::LogCompiledFunctions() {
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1591,9 +1595,9 @@
 
 
 void Logger::LogAccessorCallbacks() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HeapIterator iterator;
-  i::Isolate* isolate = ISOLATE;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsAccessorInfo()) continue;
     AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1601,11 +1605,11 @@
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     if (getter_entry != 0) {
-      PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
+      PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
-      PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
+      PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
     }
   }
 }
diff --git a/src/log.h b/src/log.h
index 50358ce..677dada 100644
--- a/src/log.h
+++ b/src/log.h
@@ -29,6 +29,7 @@
 #define V8_LOG_H_
 
 #include "allocation.h"
+#include "objects.h"
 #include "platform.h"
 #include "log-utils.h"
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 30838bd..364fdb6 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,6 +93,63 @@
 namespace v8 {
 namespace internal {
 
+class FrameScope {
+ public:
+  explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
+      : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
+    masm->set_has_frame(true);
+    if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm->EnterFrame(type);
+    }
+  }
+
+  ~FrameScope() {
+    if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm_->LeaveFrame(type_);
+    }
+    masm_->set_has_frame(old_has_frame_);
+  }
+
+  // Normally we generate the leave-frame code when this object goes
+  // out of scope.  Sometimes we may need to generate the code somewhere else
+  // in addition.  Calling this will achieve that, but the object stays in
+  // scope, the MacroAssembler is still marked as being in a frame scope, and
+  // the code will be generated again when it goes out of scope.
+  void GenerateLeaveFrame() {
+    masm_->LeaveFrame(type_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  StackFrame::Type type_;
+  bool old_has_frame_;
+};
+
+
+class AllowExternalCallThatCantCauseGC: public FrameScope {
+ public:
+  explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
+      : FrameScope(masm, StackFrame::NONE) { }
+};
+
+
+class NoCurrentFrameScope {
+ public:
+  explicit NoCurrentFrameScope(MacroAssembler* masm)
+      : masm_(masm), saved_(masm->has_frame()) {
+    masm->set_has_frame(false);
+  }
+
+  ~NoCurrentFrameScope() {
+    masm_->set_has_frame(saved_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  bool saved_;
+};
+
+
 // Support for "structured" code comments.
 #ifdef DEBUG
 
diff --git a/src/macros.py b/src/macros.py
index 7a493ca..bf7119f 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -82,8 +82,6 @@
 const kMaxYear  = 1000000;
 const kMinMonth = -10000000;
 const kMaxMonth = 10000000;
-const kMinDate  = -100000000;
-const kMaxDate  = 100000000;
 
 # Native cache ids.
 const STRING_TO_REGEXP_CACHE_ID = 0;
@@ -128,6 +126,11 @@
 # we cannot handle those anyway.
 macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
 
+# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
+const kBoundFunctionIndex = 0;
+const kBoundThisIndex = 1;
+const kBoundArgumentsStartIndex = 2;
+
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
new file mode 100644
index 0000000..573715e
--- /dev/null
+++ b/src/mark-compact-inl.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_INL_H_
+#define V8_MARK_COMPACT_INL_H_
+
+#include "isolate.h"
+#include "memory.h"
+#include "mark-compact.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+  MemoryChunk* p = MemoryChunk::FromAddress(addr);
+  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+                                         p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+  sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+    ProcessNewlyMarkedObject(obj);
+  }
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(!mark_bit.Get());
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+  ASSERT(obj->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(obj);
+  return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot,
+                                      Object** slot,
+                                      Object* object) {
+  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+  if (object_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            object_page->slots_buffer_address(),
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(object_page);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MARK_COMPACT_INL_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 9b0d5fc..cc5fda7 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -27,20 +27,31 @@
 
 #include "v8.h"
 
+#include "code-stubs.h"
 #include "compilation-cache.h"
+#include "deoptimizer.h"
 #include "execution.h"
-#include "heap-profiler.h"
 #include "gdb-jit.h"
 #include "global-handles.h"
+#include "heap-profiler.h"
 #include "ic-inl.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
@@ -48,70 +59,462 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      force_compaction_(false),
-      compacting_collection_(false),
-      compact_on_next_gc_(false),
-      previous_marked_count_(0),
+      sweep_precisely_(false),
+      compacting_(false),
+      was_marked_incrementally_(false),
+      collect_maps_(FLAG_collect_maps),
       tracer_(NULL),
-#ifdef DEBUG
-      live_young_objects_size_(0),
-      live_old_pointer_objects_size_(0),
-      live_old_data_objects_size_(0),
-      live_code_objects_size_(0),
-      live_map_objects_size_(0),
-      live_cell_objects_size_(0),
-      live_lo_objects_size_(0),
-      live_bytes_(0),
-#endif
+      migration_slots_buffer_(NULL),
       heap_(NULL),
       code_flusher_(NULL),
       encountered_weak_maps_(NULL) { }
 
 
+#ifdef DEBUG
+class VerifyMarkingVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyMarking(Address bottom, Address top) {
+  VerifyMarkingVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+  Address end = space->top();
+  NewSpacePageIterator it(space->bottom(), end);
+  // The bottom position is at the start of its page. Allows us to use
+  // page->area_start() as start of range on all pages.
+  ASSERT_EQ(space->bottom(),
+            NewSpacePage::FromAddress(space->bottom())->area_start());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address limit = it.has_next() ? page->area_end() : end;
+    ASSERT(limit == end || !page->Contains(end));
+    VerifyMarking(page->area_start(), limit);
+  }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    VerifyMarking(p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+  VerifyMarking(heap->old_pointer_space());
+  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->code_space());
+  VerifyMarking(heap->cell_space());
+  VerifyMarking(heap->map_space());
+  VerifyMarking(heap->new_space());
+
+  VerifyMarkingVisitor visitor;
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    if (MarkCompactCollector::IsMarked(obj)) {
+      obj->Iterate(&visitor);
+    }
+  }
+
+  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyEvacuation(Address bottom, Address top) {
+  VerifyEvacuationVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+  VerifyEvacuationVisitor visitor;
+
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
+    ASSERT(limit == space->top() || !page->Contains(space->top()));
+    while (current < limit) {
+      HeapObject* object = HeapObject::FromAddress(current);
+      object->Iterate(&visitor);
+      current += object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->IsEvacuationCandidate()) continue;
+    VerifyEvacuation(p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+  VerifyEvacuation(heap->old_pointer_space());
+  VerifyEvacuation(heap->old_data_space());
+  VerifyEvacuation(heap->code_space());
+  VerifyEvacuation(heap->cell_space());
+  VerifyEvacuation(heap->map_space());
+  VerifyEvacuation(heap->new_space());
+
+  VerifyEvacuationVisitor visitor;
+  heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  p->MarkEvacuationCandidate();
+  evacuation_candidates_.Add(p);
+}
+
+
+bool MarkCompactCollector::StartCompaction() {
+  if (!compacting_) {
+    ASSERT(evacuation_candidates_.length() == 0);
+
+    CollectEvacuationCandidates(heap()->old_pointer_space());
+    CollectEvacuationCandidates(heap()->old_data_space());
+
+    if (FLAG_compact_code_space) {
+      CollectEvacuationCandidates(heap()->code_space());
+    }
+
+    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+    compacting_ = evacuation_candidates_.length() > 0;
+  }
+
+  return compacting_;
+}
+
+
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   ASSERT(state_ == PREPARE_GC);
   ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
 
-  // Prepare has selected whether to compact the old generation or not.
-  // Tell the tracer.
-  if (IsCompacting()) tracer_->set_is_compacting();
-
   MarkLiveObjects();
+  ASSERT(heap_->incremental_marking()->IsStopped());
 
-  if (FLAG_collect_maps) ClearNonLiveTransitions();
+  if (collect_maps_) ClearNonLiveTransitions();
 
   ClearWeakMaps();
 
-  SweepLargeObjectSpace();
-
-  if (IsCompacting()) {
-    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
-    EncodeForwardingAddresses();
-
-    heap()->MarkMapPointersAsEncoded(true);
-    UpdatePointers();
-    heap()->MarkMapPointersAsEncoded(false);
-    heap()->isolate()->pc_to_code_cache()->Flush();
-
-    RelocateObjects();
-  } else {
-    SweepSpaces();
-    heap()->isolate()->pc_to_code_cache()->Flush();
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyMarking(heap_);
   }
+#endif
+
+  SweepSpaces();
+
+  if (!collect_maps_) ReattachInitialMaps();
+
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
 
   Finish();
 
-  // Save the count of marked objects remaining after the collection and
-  // null out the GC tracer.
-  previous_marked_count_ = tracer_->marked_count();
-  ASSERT(previous_marked_count_ == 0);
   tracer_ = NULL;
 }
 
 
+#ifdef DEBUG
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+  VerifyMarkbitsAreClean(heap_->old_pointer_space());
+  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->code_space());
+  VerifyMarkbitsAreClean(heap_->cell_space());
+  VerifyMarkbitsAreClean(heap_->map_space());
+  VerifyMarkbitsAreClean(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    ASSERT(Marking::IsWhite(mark_bit));
+  }
+}
+#endif
+
+
+static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbitsInNewSpace(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void MarkCompactCollector::ClearMarkbits() {
+  ClearMarkbitsInPagedSpace(heap_->code_space());
+  ClearMarkbitsInPagedSpace(heap_->map_space());
+  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
+  ClearMarkbitsInPagedSpace(heap_->old_data_space());
+  ClearMarkbitsInPagedSpace(heap_->cell_space());
+  ClearMarkbitsInNewSpace(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    mark_bit.Clear();
+    mark_bit.Next().Clear();
+  }
+}
+
+
+bool Marking::TransferMark(Address old_start, Address new_start) {
+  // This is only used when resizing an object.
+  ASSERT(MemoryChunk::FromAddress(old_start) ==
+         MemoryChunk::FromAddress(new_start));
+
+  // If the mark doesn't move, we don't check the color of the object.
+  // It doesn't matter whether the object is black, since it hasn't changed
+  // size, so the adjustment to the live data count will be zero anyway.
+  if (old_start == new_start) return false;
+
+  MarkBit new_mark_bit = MarkBitFrom(new_start);
+  MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+  ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+  if (Marking::IsBlack(old_mark_bit)) {
+    old_mark_bit.Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    Marking::MarkBlack(new_mark_bit);
+    return true;
+  } else if (Marking::IsGrey(old_mark_bit)) {
+    ASSERT(heap_->incremental_marking()->IsMarking());
+    old_mark_bit.Clear();
+    old_mark_bit.Next().Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    heap_->incremental_marking()->WhiteToGreyAndPush(
+        HeapObject::FromAddress(new_start), new_mark_bit);
+    heap_->incremental_marking()->RestartIfNotMarking();
+  }
+
+#ifdef DEBUG
+  ObjectColor new_color = Color(new_mark_bit);
+  ASSERT(new_color == old_color);
+#endif
+
+  return false;
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE: return "NEW_SPACE";
+    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
+    case CODE_SPACE: return "CODE_SPACE";
+    case MAP_SPACE: return "MAP_SPACE";
+    case CELL_SPACE: return "CELL_SPACE";
+    case LO_SPACE: return "LO_SPACE";
+    default:
+      UNREACHABLE();
+  }
+
+  return NULL;
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+  ASSERT(space->identity() == OLD_POINTER_SPACE ||
+         space->identity() == OLD_DATA_SPACE ||
+         space->identity() == CODE_SPACE);
+
+  int number_of_pages = space->CountTotalPages();
+
+  PageIterator it(space);
+  const int kMaxMaxEvacuationCandidates = 1000;
+  int max_evacuation_candidates = Min(
+    kMaxMaxEvacuationCandidates,
+    static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+
+  if (FLAG_stress_compaction || FLAG_always_compact) {
+    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+  }
+
+  class Candidate {
+   public:
+    Candidate() : fragmentation_(0), page_(NULL) { }
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+
+    int fragmentation() { return fragmentation_; }
+    Page* page() { return page_; }
+
+   private:
+    int fragmentation_;
+    Page* page_;
+  };
+
+  Candidate candidates[kMaxMaxEvacuationCandidates];
+
+  int count = 0;
+  if (it.has_next()) it.next();  // Never compact the first page.
+  int fragmentation = 0;
+  Candidate* least = NULL;
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->ClearEvacuationCandidate();
+    if (FLAG_stress_compaction) {
+      int counter = space->heap()->ms_count();
+      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
+      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+    } else {
+      fragmentation = space->Fragmentation(p);
+    }
+    if (fragmentation != 0) {
+      if (count < max_evacuation_candidates) {
+        candidates[count++] = Candidate(fragmentation, p);
+      } else {
+        if (least == NULL) {
+          for (int i = 0; i < max_evacuation_candidates; i++) {
+            if (least == NULL ||
+                candidates[i].fragmentation() < least->fragmentation()) {
+              least = candidates + i;
+            }
+          }
+        }
+        if (least->fragmentation() < fragmentation) {
+          *least = Candidate(fragmentation, p);
+          least = NULL;
+        }
+      }
+    }
+  }
+  for (int i = 0; i < count; i++) {
+    AddEvacuationCandidate(candidates[i].page());
+  }
+
+  if (count > 0 && FLAG_trace_fragmentation) {
+    PrintF("Collected %d evacuation candidates for space %s\n",
+           count,
+           AllocationSpaceName(space->identity()));
+  }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  ASSERT_EQ(0, evacuation_candidates_.length());
+}
+
+
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+  // Disable collection of maps if incremental marking is enabled.
+  // Map collection algorithm relies on a special map transition tree traversal
+  // order which is not implemented for incremental marking.
+  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+
   // Rather than passing the tracer around we stash it in a static member
   // variable.
   tracer_ = tracer;
@@ -120,16 +523,10 @@
   ASSERT(state_ == IDLE);
   state_ = PREPARE_GC;
 #endif
-  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
 
-  compacting_collection_ =
-      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
-  compact_on_next_gc_ = false;
+  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (FLAG_never_compact) compacting_collection_ = false;
-  if (!heap()->map_space()->MapPointersEncodable())
-      compacting_collection_ = false;
-  if (FLAG_collect_maps) CreateBackPointers();
+  if (collect_maps_) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -137,21 +534,31 @@
   }
 #endif
 
+  // Clear marking bits for precise sweeping to collect all garbage.
+  if (was_marked_incrementally_ && PreciseSweepingRequired()) {
+    heap()->incremental_marking()->Abort();
+    ClearMarkbits();
+    AbortCompaction();
+    was_marked_incrementally_ = false;
+  }
+
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction();
+  }
+
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next();
-       space != NULL; space = spaces.next()) {
-    space->PrepareForMarkCompact(compacting_collection_);
+       space != NULL;
+       space = spaces.next()) {
+    space->PrepareForMarkCompact();
   }
 
 #ifdef DEBUG
-  live_bytes_ = 0;
-  live_young_objects_size_ = 0;
-  live_old_pointer_objects_size_ = 0;
-  live_old_data_objects_size_ = 0;
-  live_code_objects_size_ = 0;
-  live_map_objects_size_ = 0;
-  live_cell_objects_size_ = 0;
-  live_lo_objects_size_ = 0;
+  if (!was_marked_incrementally_ && FLAG_verify_heap) {
+    VerifyMarkbitsAreClean();
+  }
 #endif
 }
 
@@ -168,31 +575,6 @@
   heap()->isolate()->stub_cache()->Clear();
 
   heap()->external_string_table_.CleanUp();
-
-  // If we've just compacted old space there's no reason to check the
-  // fragmentation limit. Just return.
-  if (HasCompacted()) return;
-
-  // We compact the old generation on the next GC if it has gotten too
-  // fragmented (ie, we could recover an expected amount of space by
-  // reclaiming the waste and free list blocks).
-  static const int kFragmentationLimit = 15;        // Percent.
-  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
-  intptr_t old_gen_recoverable = 0;
-  intptr_t old_gen_used = 0;
-
-  OldSpaces spaces;
-  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
-    old_gen_recoverable += space->Waste() + space->AvailableFree();
-    old_gen_used += space->Size();
-  }
-
-  int old_gen_fragmentation =
-      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
-  if (old_gen_fragmentation > kFragmentationLimit &&
-      old_gen_recoverable > kFragmentationAllowed) {
-    compact_on_next_gc_ = true;
-  }
 }
 
 
@@ -237,8 +619,7 @@
   }
 
   void AddCandidate(JSFunction* function) {
-    ASSERT(function->unchecked_code() ==
-           function->unchecked_shared()->unchecked_code());
+    ASSERT(function->code() == function->shared()->code());
 
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
@@ -258,16 +639,24 @@
     while (candidate != NULL) {
       next_candidate = GetNextCandidate(candidate);
 
-      SharedFunctionInfo* shared = candidate->unchecked_shared();
+      SharedFunctionInfo* shared = candidate->shared();
 
-      Code* code = shared->unchecked_code();
-      if (!code->IsMarked()) {
+      Code* code = shared->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         shared->set_code(lazy_compile);
         candidate->set_code(lazy_compile);
       } else {
-        candidate->set_code(shared->unchecked_code());
+        candidate->set_code(shared->code());
       }
 
+      // We are in the middle of a GC cycle so the write barrier in the code
+      // setter did not record the slot update and we have to do that manually.
+      Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+      Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+      isolate_->heap()->mark_compact_collector()->
+          RecordCodeEntrySlot(slot, target);
+
       candidate = next_candidate;
     }
 
@@ -284,8 +673,9 @@
       next_candidate = GetNextCandidate(candidate);
       SetNextCandidate(candidate, NULL);
 
-      Code* code = candidate->unchecked_code();
-      if (!code->IsMarked()) {
+      Code* code = candidate->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         candidate->set_code(lazy_compile);
       }
 
@@ -311,7 +701,7 @@
 
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
-    Code* code = candidate->unchecked_code();
+    Code* code = candidate->code();
     return reinterpret_cast<SharedFunctionInfo**>(
         code->address() + Code::kNextCodeFlushingCandidateOffset);
   }
@@ -355,14 +745,14 @@
   // except the maps for the object and its possible substrings might be
   // marked.
   HeapObject* object = HeapObject::cast(*p);
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  InstanceType type = map_word.ToMap()->instance_type();
+  if (!FLAG_clever_optimizations) return object;
+  Map* map = object->map();
+  InstanceType type = map->instance_type();
   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
 
   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
-  Heap* heap = map_word.ToMap()->heap();
-  if (second != heap->raw_unchecked_empty_string()) {
+  Heap* heap = map->GetHeap();
+  if (second != heap->empty_string()) {
     return object;
   }
 
@@ -404,14 +794,12 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
+    table_.Register(kVisitGlobalContext, &VisitGlobalContext);
+
     table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
 
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticMarkingVisitor,
-                                      Context::MarkCompactBodyDescriptor,
-                                      void>::Visit);
-
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+    table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
 
@@ -456,7 +844,7 @@
   }
 
   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap, p);
+    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
   }
 
   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -466,29 +854,49 @@
       if (VisitUnmarkedObjects(heap, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
-  }
-
-  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
-      IC::Clear(rinfo->pc());
-      // Please note targets for cleared inline cached do not have to be
-      // marked since they are contained in HEAP->non_monomorphic_cache().
-    } else {
-      heap->mark_compact_collector()->MarkObject(code);
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    for (Object** p = start; p < end; p++) {
+      MarkObjectByPointer(collector, start, p);
     }
   }
 
   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
-    Object* cell = rinfo->target_cell();
-    Object* old_cell = cell;
-    VisitPointer(heap, &cell);
-    if (cell != old_cell) {
-      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+    JSGlobalPropertyCell* cell =
+        JSGlobalPropertyCell::cast(rinfo->target_cell());
+    MarkBit mark = Marking::MarkBitFrom(cell);
+    heap->mark_compact_collector()->MarkObject(cell, mark);
+  }
+
+  static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    // TODO(mstarzinger): We do not short-circuit cons strings here, verify
+    // that there can be no such embedded pointers and add assertion here.
+    HeapObject* object = HeapObject::cast(rinfo->target_object());
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    heap->mark_compact_collector()->MarkObject(object, mark);
+  }
+
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
+      IC::Clear(rinfo->pc());
+      // Please note targets for cleared inline cached do not have to be
+      // marked since they are contained in HEAP->non_monomorphic_cache().
+      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    } else {
+      if (FLAG_cleanup_code_caches_at_gc &&
+          target->kind() == Code::STUB &&
+          target->major_key() == CodeStub::CallFunction &&
+          target->has_function_cache()) {
+        CallFunctionStub::Clear(heap, rinfo->pc());
+      }
+      MarkBit code_mark = Marking::MarkBitFrom(target);
+      heap->mark_compact_collector()->MarkObject(target, code_mark);
     }
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -496,17 +904,21 @@
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
-    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    heap->mark_compact_collector()->MarkObject(code);
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    MarkBit code_mark = Marking::MarkBitFrom(target);
+    heap->mark_compact_collector()->MarkObject(target, code_mark);
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   // Mark object pointed to by p.
-  INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
+  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+                                         Object** anchor_slot,
+                                         Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    if (!object->IsMarked()) {
-      heap->mark_compact_collector()->MarkUnmarkedObject(object);
-    }
+    collector->RecordSlot(anchor_slot, p, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    collector->MarkObject(object, mark);
   }
 
 
@@ -515,12 +927,15 @@
                                          HeapObject* obj)) {
 #ifdef DEBUG
     ASSERT(Isolate::Current()->heap()->Contains(obj));
-    ASSERT(!obj->IsMarked());
+    ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
 #endif
     Map* map = obj->map();
-    collector->SetMark(obj);
+    Heap* heap = obj->GetHeap();
+    MarkBit mark = Marking::MarkBitFrom(obj);
+    heap->mark_compact_collector()->SetMark(obj, mark);
     // Mark the map pointer and the body.
-    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    heap->mark_compact_collector()->MarkObject(map, map_mark);
     IterateBody(map, obj);
   }
 
@@ -536,15 +951,19 @@
     MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
-      if (!(*p)->IsHeapObject()) continue;
-      HeapObject* obj = HeapObject::cast(*p);
-      if (obj->IsMarked()) continue;
+      Object* o = *p;
+      if (!o->IsHeapObject()) continue;
+      collector->RecordSlot(start, p, o);
+      HeapObject* obj = HeapObject::cast(o);
+      MarkBit mark = Marking::MarkBitFrom(obj);
+      if (mark.Get()) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
   }
 
   static inline void VisitExternalReference(Address* p) { }
+  static inline void VisitExternalReference(RelocInfo* rinfo) { }
   static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
 
  private:
@@ -567,7 +986,7 @@
                               void> StructObjectVisitor;
 
   static void VisitJSWeakMap(Map* map, HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
 
     // Enqueue weak map in linked list of encountered weak maps.
@@ -578,25 +997,27 @@
     // Skip visiting the backing hash table containing the mappings.
     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::BodyDescriptor::kStartOffset,
         JSWeakMap::kTableOffset);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::kTableOffset + kPointerSize,
         object_size);
 
     // Mark the backing hash table without pushing it on the marking stack.
-    ASSERT(!weak_map->unchecked_table()->IsMarked());
-    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
-    collector->SetMark(weak_map->unchecked_table());
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+    ASSERT(!MarkCompactCollector::IsMarked(table));
+    collector->SetMark(table, Marking::MarkBitFrom(table));
+    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
+    ASSERT(MarkCompactCollector::IsMarked(table->map()));
   }
 
   static void VisitCode(Map* map, HeapObject* object) {
     reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
-        map->heap());
+        map->GetHeap());
   }
 
   // Code flushing support.
@@ -608,19 +1029,19 @@
   static const int kRegExpCodeThreshold = 5;
 
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
-    Object* undefined = heap->raw_unchecked_undefined_value();
+    Object* undefined = heap->undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
 
 
   inline static bool IsCompiled(JSFunction* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
-    return function->unchecked_code() !=
+    return function->code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
@@ -629,13 +1050,16 @@
 
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (function->unchecked_code()->IsMarked()) {
-      shared_info->set_code_age(0);
+    MarkBit code_mark = Marking::MarkBitFrom(function->code());
+    if (code_mark.Get()) {
+      if (!Marking::MarkBitFrom(shared_info).Get()) {
+        shared_info->set_code_age(0);
+      }
       return false;
     }
 
     // We do not flush code for optimized functions.
-    if (function->code() != shared_info->unchecked_code()) {
+    if (function->code() != shared_info->code()) {
       return false;
     }
 
@@ -645,8 +1069,9 @@
   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (shared_info->unchecked_code()->IsMarked()) {
-      shared_info->set_code_age(0);
+    MarkBit code_mark =
+        Marking::MarkBitFrom(shared_info->code());
+    if (code_mark.Get()) {
       return false;
     }
 
@@ -658,9 +1083,7 @@
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
-    if (function_data->IsHeapObject() &&
-        (SafeMap(function_data)->instance_type() ==
-         FUNCTION_TEMPLATE_INFO_TYPE)) {
+    if (function_data->IsFunctionTemplateInfo()) {
       return false;
     }
 
@@ -701,40 +1124,9 @@
     return true;
   }
 
-
-  static inline Map* SafeMap(Object* obj) {
-    MapWord map_word = HeapObject::cast(obj)->map_word();
-    map_word.ClearMark();
-    map_word.ClearOverflow();
-    return map_word.ToMap();
-  }
-
-
-  static inline bool IsJSBuiltinsObject(Object* obj) {
-    return obj->IsHeapObject() &&
-        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
-  }
-
-
   static inline bool IsValidNotBuiltinContext(Object* ctx) {
-    if (!ctx->IsHeapObject()) return false;
-
-    Map* map = SafeMap(ctx);
-    Heap* heap = map->heap();
-    if (!(map == heap->raw_unchecked_function_context_map() ||
-          map == heap->raw_unchecked_catch_context_map() ||
-          map == heap->raw_unchecked_with_context_map() ||
-          map == heap->raw_unchecked_global_context_map())) {
-      return false;
-    }
-
-    Context* context = reinterpret_cast<Context*>(ctx);
-
-    if (IsJSBuiltinsObject(context->global())) {
-      return false;
-    }
-
-    return true;
+    return ctx->IsContext() &&
+        !Context::cast(ctx)->global()->IsJSBuiltinsObject();
   }
 
 
@@ -754,13 +1146,15 @@
                                           bool is_ascii) {
     // Make sure that the fixed array is in fact initialized on the RegExp.
     // We could potentially trigger a GC when initializing the RegExp.
-    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+    if (HeapObject::cast(re->data())->map()->instance_type() !=
+            FIXED_ARRAY_TYPE) return;
 
     // Make sure this is a RegExp that actually contains code.
     if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
 
     Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
-    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+    if (!code->IsSmi() &&
+        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
       // Save a copy that can be reinstated if we need the code again.
       re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
                              code,
@@ -796,7 +1190,7 @@
   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
   // we flush the code.
   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSRegExpFields(map, object);
@@ -813,7 +1207,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitSharedFunctionInfoGeneric(map, object);
       return;
@@ -824,7 +1218,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
       Map* map, HeapObject* object, bool known_flush_code_candidate) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -841,18 +1235,30 @@
 
 
   static void VisitCodeEntry(Heap* heap, Address entry_address) {
-    Object* code = Code::GetObjectFromEntryAddress(entry_address);
-    Object* old_code = code;
-    VisitPointer(heap, &code);
-    if (code != old_code) {
-      Memory::Address_at(entry_address) =
-          reinterpret_cast<Code*>(code)->entry();
+    Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+    MarkBit mark = Marking::MarkBitFrom(code);
+    heap->mark_compact_collector()->MarkObject(code, mark);
+    heap->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, code);
+  }
+
+  static void VisitGlobalContext(Map* map, HeapObject* object) {
+    FixedBodyVisitor<StaticMarkingVisitor,
+                     Context::MarkCompactBodyDescriptor,
+                     void>::Visit(map, object);
+
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+    for (int idx = Context::FIRST_WEAK_SLOT;
+         idx < Context::GLOBAL_CONTEXT_SLOTS;
+         ++idx) {
+      Object** slot =
+          HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+      collector->RecordSlot(slot, slot, *slot);
     }
   }
 
-
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSFunction(map, object);
@@ -867,10 +1273,12 @@
     }
 
     if (!flush_code_candidate) {
-      collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
+      Code* code = jsfunction->shared()->code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      collector->MarkObject(code, code_mark);
 
-      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
+      if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        collector->MarkInlinedFunctionsCode(jsfunction->code());
       }
     }
 
@@ -894,12 +1302,11 @@
   static inline void VisitJSFunctionFields(Map* map,
                                            JSFunction* object,
                                            bool flush_code_candidate) {
-    Heap* heap = map->heap();
-    MarkCompactCollector* collector = heap->mark_compact_collector();
+    Heap* heap = map->GetHeap();
 
     VisitPointers(heap,
-                  SLOT_ADDR(object, JSFunction::kPropertiesOffset),
-                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
     if (!flush_code_candidate) {
       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -909,29 +1316,39 @@
       // Visit shared function info to avoid double checking of it's
       // flushability.
       SharedFunctionInfo* shared_info = object->unchecked_shared();
-      if (!shared_info->IsMarked()) {
+      MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
+      if (!shared_info_mark.Get()) {
         Map* shared_info_map = shared_info->map();
-        collector->SetMark(shared_info);
-        collector->MarkObject(shared_info_map);
+        MarkBit shared_info_map_mark =
+            Marking::MarkBitFrom(shared_info_map);
+        heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
+        heap->mark_compact_collector()->MarkObject(shared_info_map,
+                                                   shared_info_map_mark);
         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
                                                    shared_info,
                                                    true);
       }
     }
 
-    VisitPointers(heap,
-                  SLOT_ADDR(object,
-                            JSFunction::kCodeEntryOffset + kPointerSize),
-                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
+    VisitPointers(
+        heap,
+        HeapObject::RawField(object,
+                             JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object,
+                             JSFunction::kNonWeakFieldsEndOffset));
 
     // Don't visit the next function list field as it is a weak reference.
+    Object** next_function =
+        HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
+    heap->mark_compact_collector()->RecordSlot(
+        next_function, next_function, *next_function);
   }
 
   static inline void VisitJSRegExpFields(Map* map,
                                          HeapObject* object) {
     int last_property_offset =
         JSRegExp::kSize + kPointerSize * map->inobject_properties();
-    VisitPointers(map->heap(),
+    VisitPointers(map->GetHeap(),
                   SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
                   SLOT_ADDR(object, last_property_offset));
   }
@@ -1007,8 +1424,10 @@
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      collector_->MarkObject(shared->unchecked_code());
-      collector_->MarkObject(shared);
+      MarkBit shared_mark = Marking::MarkBitFrom(shared);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+      collector_->MarkObject(shared->code(), code_mark);
+      collector_->MarkObject(shared, shared_mark);
     }
   }
 
@@ -1022,16 +1441,17 @@
   // of it's code and non-optimized version of all inlined functions.
   // This is required to support bailing out from inlined code.
   DeoptimizationInputData* data =
-      reinterpret_cast<DeoptimizationInputData*>(
-          code->unchecked_deoptimization_data());
+      DeoptimizationInputData::cast(code->deoptimization_data());
 
-  FixedArray* literals = data->UncheckedLiteralArray();
+  FixedArray* literals = data->LiteralArray();
 
   for (int i = 0, count = data->InlinedFunctionCount()->value();
        i < count;
        i++) {
-    JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
-    MarkObject(inlined->unchecked_shared()->unchecked_code());
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    Code* inlined_code = inlined->shared()->code();
+    MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
+    MarkObject(inlined_code, inlined_code_mark);
   }
 }
 
@@ -1045,7 +1465,8 @@
     // actual optimized code object.
     StackFrame* frame = it.frame();
     Code* code = frame->unchecked_code();
-    MarkObject(code);
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
     if (frame->is_optimized()) {
       MarkInlinedFunctionsCode(frame->LookupCode());
     }
@@ -1056,7 +1477,8 @@
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
-  if (!FLAG_flush_code) {
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  if (!FLAG_flush_code || was_marked_incrementally_) {
     EnableCodeFlushing(false);
     return;
   }
@@ -1068,11 +1490,14 @@
     return;
   }
 #endif
+
   EnableCodeFlushing(true);
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
+  HeapObject* descriptor_array = heap()->empty_descriptor_array();
+  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+  MarkObject(descriptor_array, descriptor_array_mark);
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
@@ -1089,7 +1514,7 @@
   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1113,19 +1538,21 @@
 
     // Replace flat cons strings in place.
     HeapObject* object = ShortCircuitConsString(p);
-    if (object->IsMarked()) return;
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) return;
 
     Map* map = object->map();
     // Mark the object.
-    collector_->SetMark(object);
+    collector_->SetMark(object, mark_bit);
 
     // Mark the map pointer and body, and push them on the marking stack.
-    collector_->MarkObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    collector_->MarkObject(map, map_mark);
     StaticMarkingVisitor::IterateBody(map, object);
 
     // Mark all the objects reachable from the map and body.  May leave
     // overflowed objects in the heap.
-    collector_->EmptyMarkingStack();
+    collector_->EmptyMarkingDeque();
   }
 
   MarkCompactCollector* collector_;
@@ -1141,17 +1568,19 @@
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+      Object* o = *p;
+      if (o->IsHeapObject() &&
+          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
         // Check if the symbol being pruned is an external symbol. We need to
         // delete the associated external data as this symbol is going away.
 
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        if ((*p)->IsExternalString()) {
+        if (o->IsExternalString()) {
           heap_->FinalizeExternalString(String::cast(*p));
         }
-        // Set the entry to null_value (as deleted).
-        *p = heap_->raw_unchecked_null_value();
+        // Set the entry to the_hole_value (as deleted).
+        *p = heap_->the_hole_value();
         pointers_removed_++;
       }
     }
@@ -1172,8 +1601,7 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    MapWord first_word = HeapObject::cast(object)->map_word();
-    if (first_word.IsMarked()) {
+    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
       return object;
     } else {
       return NULL;
@@ -1182,28 +1610,26 @@
 };
 
 
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
-  ASSERT(!object->IsMarked());
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
+  ASSERT(IsMarked(object));
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
     if (FLAG_cleanup_code_caches_at_gc) {
       map->ClearCodeCache(heap());
     }
-    SetMark(map);
 
     // When map collection is enabled we have to mark through map's transitions
     // in a special way to make transition links weak.
     // Only maps for subclasses of JSReceiver can have transitions.
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+    if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
       MarkMapContents(map);
     } else {
-      marking_stack_.Push(map);
+      marking_deque_.PushBlack(map);
     }
   } else {
-    SetMark(object);
-    marking_stack_.Push(object);
+    marking_deque_.PushBlack(object);
   }
 }
 
@@ -1212,12 +1638,17 @@
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
   // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
-  if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
+  FixedArray* prototype_transitions = map->prototype_transitions();
+  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+  if (!mark.Get()) {
+    mark.Set();
+    MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
+                                    prototype_transitions->Size());
+  }
 
-  Object* raw_descriptor_array =
-      *HeapObject::RawField(map,
-                            Map::kInstanceDescriptorsOrBitField3Offset);
+  Object** raw_descriptor_array_slot =
+      HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
+  Object* raw_descriptor_array = *raw_descriptor_array_slot;
   if (!raw_descriptor_array->IsSmi()) {
     MarkDescriptorArray(
         reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1231,24 +1662,26 @@
 
   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
 
-  StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
+  StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
 }
 
 
 void MarkCompactCollector::MarkDescriptorArray(
     DescriptorArray* descriptors) {
-  if (descriptors->IsMarked()) return;
+  MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
+  if (descriptors_mark.Get()) return;
   // Empty descriptor array is marked as a root before any maps are marked.
-  ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
-  SetMark(descriptors);
+  ASSERT(descriptors != heap()->empty_descriptor_array());
+  SetMark(descriptors, descriptors_mark);
 
   FixedArray* contents = reinterpret_cast<FixedArray*>(
       descriptors->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->IsHeapObject());
-  ASSERT(!contents->IsMarked());
+  ASSERT(!IsMarked(contents));
   ASSERT(contents->IsFixedArray());
   ASSERT(contents->length() >= 2);
-  SetMark(contents);
+  MarkBit contents_mark = Marking::MarkBitFrom(contents);
+  SetMark(contents, contents_mark);
   // Contents contains (value, details) pairs.  If the details say that the type
   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1258,27 +1691,44 @@
     // If the pair (value, details) at index i, i+1 is not
     // a transition or null descriptor, mark the value.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
-      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
-      if (object->IsHeapObject() && !object->IsMarked()) {
-        SetMark(object);
-        marking_stack_.Push(object);
+
+    Object** slot = contents->data_start() + i;
+    Object* value = *slot;
+    if (!value->IsHeapObject()) continue;
+
+    RecordSlot(slot, slot, *slot);
+
+    if (details.IsProperty()) {
+      HeapObject* object = HeapObject::cast(value);
+      MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+      if (!mark.Get()) {
+        SetMark(HeapObject::cast(object), mark);
+        marking_deque_.PushBlack(object);
+      }
+    } else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) {
+      // For maps with multiple elements transitions, the transition maps are
+      // stored in a FixedArray. Keep the fixed array alive but not the maps
+      // that it refers to.
+      HeapObject* object = HeapObject::cast(value);
+      MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+      if (!mark.Get()) {
+        SetMark(HeapObject::cast(object), mark);
       }
     }
   }
   // The DescriptorArray descriptors contains a pointer to its contents array,
   // but the contents array is already marked.
-  marking_stack_.Push(descriptors);
+  marking_deque_.PushBlack(descriptors);
 }
 
 
 void MarkCompactCollector::CreateBackPointers() {
   HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.next();
-       next_object != NULL; next_object = iterator.next()) {
-    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
+  for (HeapObject* next_object = iterator.Next();
+       next_object != NULL; next_object = iterator.Next()) {
+    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
       Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
       if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
         map->CreateBackPointers();
       } else {
@@ -1289,54 +1739,126 @@
 }
 
 
-static int OverflowObjectSize(HeapObject* obj) {
-  // Recover the normal map pointer, it might be marked as live and
-  // overflowed.
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return obj->SizeFromMap(map_word.ToMap());
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template<class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+                                            MarkingDeque* marking_deque,
+                                            T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  ASSERT(!marking_deque->IsFull());
+
+  Map* filler_map = heap->one_pointer_filler_map();
+  for (HeapObject* object = it->Next();
+       object != NULL;
+       object = it->Next()) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+      Marking::GreyToBlack(markbit);
+      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+    }
+  }
 }
 
 
-class OverflowedObjectsScanner : public AllStatic {
- public:
-  // Fill the marking stack with overflowed objects returned by the given
-  // iterator.  Stop when the marking stack is filled or the end of the space
-  // is reached, whichever comes first.
-  template<class T>
-  static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
-                                           T* it) {
-    // The caller should ensure that the marking stack is initially not full,
-    // so that we don't waste effort pointlessly scanning for objects.
-    ASSERT(!collector->marking_stack_.is_full());
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
 
-    for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
-      if (object->IsOverflowed()) {
-        object->ClearOverflow();
-        ASSERT(object->IsMarked());
-        ASSERT(HEAP->Contains(object));
-        collector->marking_stack_.Push(object);
-        if (collector->marking_stack_.is_full()) return;
-      }
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
+
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+
+    const MarkBit::CellType current_cell = cells[cell_index];
+    if (current_cell == 0) continue;
+
+    const MarkBit::CellType next_cell = cells[cell_index + 1];
+    MarkBit::CellType grey_objects = current_cell &
+        ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+
+    int offset = 0;
+    while (grey_objects != 0) {
+      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
+      grey_objects >>= trailing_zeros;
+      offset += trailing_zeros;
+      MarkBit markbit(&cells[cell_index], 1 << offset, false);
+      ASSERT(Marking::IsGrey(markbit));
+      Marking::GreyToBlack(markbit);
+      Address addr = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(addr);
+      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+      offset += 2;
+      grey_objects >>= 2;
+    }
+
+    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+  }
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap,
+                                       MarkingDeque* marking_deque,
+                                       PagedSpace* space) {
+  if (!space->was_swept_conservatively()) {
+    HeapObjectIterator it(space);
+    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
+  } else {
+    PageIterator it(space);
+    while (it.has_next()) {
+      Page* p = it.next();
+      DiscoverGreyObjectsOnPage(marking_deque, p);
+      if (marking_deque->IsFull()) return;
     }
   }
-};
+}
 
 
 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
-  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+  Object* o = *p;
+  if (!o->IsHeapObject()) return false;
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
 }
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   // Mark the symbol table itself.
-  SetMark(symbol_table);
+  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
+  SetMark(symbol_table, symbol_table_mark);
   // Explicitly mark the prefix.
   MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1349,9 +1871,9 @@
   MarkSymbolTable();
 
   // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
@@ -1369,9 +1891,13 @@
     bool group_marked = false;
     for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
-      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
-        group_marked = true;
-        break;
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        if (mark.Get()) {
+          group_marked = true;
+          break;
+        }
       }
     }
 
@@ -1380,17 +1906,21 @@
       continue;
     }
 
-    // An object in the group is marked, so mark all heap objects in
-    // the group.
+    // An object in the group is marked, so mark as grey all white heap
+    // objects in the group.
     for (size_t j = 0; j < entry->length_; ++j) {
-      if ((*objects[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*objects[j]));
+      Object* object = *objects[j];
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        MarkObject(heap_object, mark);
       }
     }
 
-    // Once the entire group has been marked, dispose it because it's
-    // not needed anymore.
+    // Once the entire group has been colored grey, set the object group
+    // to NULL so it won't be processed again.
     entry->Dispose();
+    object_groups->at(i) = NULL;
   }
   object_groups->Rewind(last);
 }
@@ -1405,7 +1935,7 @@
     ImplicitRefGroup* entry = ref_groups->at(i);
     ASSERT(entry != NULL);
 
-    if (!(*entry->parent_)->IsMarked()) {
+    if (!IsMarked(*entry->parent_)) {
       (*ref_groups)[last++] = entry;
       continue;
     }
@@ -1414,7 +1944,9 @@
     // A parent object is marked, so mark all child heap objects.
     for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*children[j]));
+        HeapObject* child = HeapObject::cast(*children[j]);
+        MarkBit mark = Marking::MarkBitFrom(child);
+        MarkObject(child, mark);
       }
     }
 
@@ -1430,21 +1962,17 @@
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack() {
-  while (!marking_stack_.is_empty()) {
-    while (!marking_stack_.is_empty()) {
-      HeapObject* object = marking_stack_.Pop();
+void MarkCompactCollector::EmptyMarkingDeque() {
+  while (!marking_deque_.IsEmpty()) {
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* object = marking_deque_.Pop();
       ASSERT(object->IsHeapObject());
       ASSERT(heap()->Contains(object));
-      ASSERT(object->IsMarked());
-      ASSERT(!object->IsOverflowed());
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
 
-      // Because the object is marked, we have to recover the original map
-      // pointer and use it to mark the object's body.
-      MapWord map_word = object->map_word();
-      map_word.ClearMark();
-      Map* map = map_word.ToMap();
-      MarkObject(map);
+      Map* map = object->map();
+      MarkBit map_mark = Marking::MarkBitFrom(map);
+      MarkObject(map, map_mark);
 
       StaticMarkingVisitor::IterateBody(map, object);
     }
@@ -1461,39 +1989,45 @@
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
-void MarkCompactCollector::RefillMarkingStack() {
-  ASSERT(marking_stack_.overflowed());
+void MarkCompactCollector::RefillMarkingDeque() {
+  ASSERT(marking_deque_.overflowed());
 
-  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
-  if (marking_stack_.is_full()) return;
+  SemiSpaceIterator new_it(heap()->new_space());
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
-                                    &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_pointer_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_data_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->code_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->map_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->cell_space());
+  if (marking_deque_.IsFull()) return;
 
-  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
-  if (marking_stack_.is_full()) return;
+  LargeObjectIterator lo_it(heap()->lo_space());
+  DiscoverGreyObjectsWithIterator(heap(),
+                                  &marking_deque_,
+                                  &lo_it);
+  if (marking_deque_.IsFull()) return;
 
-  marking_stack_.clear_overflowed();
+  marking_deque_.ClearOverflowed();
 }
 
 
@@ -1501,23 +2035,23 @@
 // stack.  Before: the marking stack contains zero or more heap object
 // pointers.  After: the marking stack is empty and there are no overflowed
 // objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack() {
-  EmptyMarkingStack();
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+void MarkCompactCollector::ProcessMarkingDeque() {
+  EmptyMarkingDeque();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
 
 void MarkCompactCollector::ProcessExternalMarking() {
   bool work_to_do = true;
-  ASSERT(marking_stack_.is_empty());
+  ASSERT(marking_deque_.IsEmpty());
   while (work_to_do) {
     MarkObjectGroups();
     MarkImplicitRefGroups();
-    work_to_do = !marking_stack_.is_empty();
-    ProcessMarkingStack();
+    work_to_do = !marking_deque_.IsEmpty();
+    ProcessMarkingDeque();
   }
 }
 
@@ -1529,19 +2063,64 @@
   // with the C stack limit check.
   PostponeInterruptsScope postpone(heap()->isolate());
 
+  bool incremental_marking_overflowed = false;
+  IncrementalMarking* incremental_marking = heap_->incremental_marking();
+  if (was_marked_incrementally_) {
+    // Finalize the incremental marking and check whether we had an overflow.
+    // Both markers use grey color to mark overflowed objects so
+    // non-incremental marker can deal with them as if overflow
+    // occured during normal marking.
+    // But incremental marker uses a separate marking deque
+    // so we have to explicitly copy it's overflow state.
+    incremental_marking->Finalize();
+    incremental_marking_overflowed =
+        incremental_marking->marking_deque()->overflowed();
+    incremental_marking->marking_deque()->ClearOverflowed();
+  } else {
+    // Abort any pending incremental activities e.g. incremental sweeping.
+    incremental_marking->Abort();
+  }
+
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
 #endif
-  // The to space contains live objects, the from space is used as a marking
-  // stack.
-  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
-                            heap()->new_space()->FromSpaceHigh());
+  // The to space contains live objects, a page in from space is used as a
+  // marking stack.
+  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+  if (FLAG_force_marking_deque_overflows) {
+    marking_deque_end = marking_deque_start + 64 * kPointerSize;
+  }
+  marking_deque_.Initialize(marking_deque_start,
+                            marking_deque_end);
+  ASSERT(!marking_deque_.overflowed());
 
-  ASSERT(!marking_stack_.overflowed());
+  if (incremental_marking_overflowed) {
+    // There are overflowed objects left in the heap after incremental marking.
+    marking_deque_.SetOverflowed();
+  }
 
   PrepareForCodeFlushing();
 
+  if (was_marked_incrementally_) {
+    // There is no write barrier on cells so we have to scan them now at the end
+    // of the incremental marking.
+    {
+      HeapObjectIterator cell_iterator(heap()->cell_space());
+      HeapObject* cell;
+      while ((cell = cell_iterator.Next()) != NULL) {
+        ASSERT(cell->IsJSGlobalPropertyCell());
+        if (IsMarked(cell)) {
+          int offset = JSGlobalPropertyCell::kValueOffset;
+          StaticMarkingVisitor::VisitPointer(
+              heap(),
+              reinterpret_cast<Object**>(cell->address() + offset));
+        }
+      }
+    }
+  }
+
   RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
@@ -1560,15 +2139,20 @@
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 
   // Repeat host application specific marking to mark unmarked objects
   // reachable from the weak roots.
   ProcessExternalMarking();
 
+  AfterMarking();
+}
+
+
+void MarkCompactCollector::AfterMarking() {
   // Object literal map caches reference symbols (cache keys) and maps
   // (cache values). At this point still useful maps have already been
   // marked. Mark the keys for the alive values before we process the
@@ -1578,7 +2162,7 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -1607,13 +2191,13 @@
   Object* raw_context = heap()->global_contexts_list_;
   while (raw_context != heap()->undefined_value()) {
     Context* context = reinterpret_cast<Context*>(raw_context);
-    if (context->IsMarked()) {
+    if (IsMarked(context)) {
       HeapObject* raw_map_cache =
           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
       // A map cache may be reachable from the stack. In this case
       // it's already transitively marked and it's too late to clean
       // up its parts.
-      if (!raw_map_cache->IsMarked() &&
+      if (!IsMarked(raw_map_cache) &&
           raw_map_cache != heap()->undefined_value()) {
         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
         int existing_elements = map_cache->NumberOfElements();
@@ -1623,17 +2207,16 @@
              i += MapCache::kEntrySize) {
           Object* raw_key = map_cache->get(i);
           if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->null_value()) continue;
+              raw_key == heap()->the_hole_value()) continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() &&
-              HeapObject::cast(raw_map)->IsMarked()) {
+          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
             ++used_elements;
           } else {
             // Delete useless entries with unmarked maps.
             ASSERT(raw_map->IsMap());
-            map_cache->set_null_unchecked(heap(), i);
-            map_cache->set_null_unchecked(heap(), i + 1);
+            map_cache->set_the_hole(i);
+            map_cache->set_the_hole(i + 1);
           }
         }
         if (used_elements == 0) {
@@ -1643,64 +2226,38 @@
           // extra complexity during GC. We rely on subsequent cache
           // usages (EnsureCapacity) to do this.
           map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkObject(map_cache);
+          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+          MarkObject(map_cache, map_cache_markbit);
         }
       }
     }
     // Move to next element in the list.
     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
   }
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
-#ifdef DEBUG
-void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
-  live_bytes_ += obj->Size();
-  if (heap()->new_space()->Contains(obj)) {
-    live_young_objects_size_ += obj->Size();
-  } else if (heap()->map_space()->Contains(obj)) {
-    ASSERT(obj->IsMap());
-    live_map_objects_size_ += obj->Size();
-  } else if (heap()->cell_space()->Contains(obj)) {
-    ASSERT(obj->IsJSGlobalPropertyCell());
-    live_cell_objects_size_ += obj->Size();
-  } else if (heap()->old_pointer_space()->Contains(obj)) {
-    live_old_pointer_objects_size_ += obj->Size();
-  } else if (heap()->old_data_space()->Contains(obj)) {
-    live_old_data_objects_size_ += obj->Size();
-  } else if (heap()->code_space()->Contains(obj)) {
-    live_code_objects_size_ += obj->Size();
-  } else if (heap()->lo_space()->Contains(obj)) {
-    live_lo_objects_size_ += obj->Size();
-  } else {
-    UNREACHABLE();
+void MarkCompactCollector::ReattachInitialMaps() {
+  HeapObjectIterator map_iterator(heap()->map_space());
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL;
+       obj = map_iterator.Next()) {
+    if (obj->IsFreeSpace()) continue;
+    Map* map = Map::cast(obj);
+
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+
+    if (map->attached_to_shared_function_info()) {
+      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
+    }
   }
 }
-#endif  // DEBUG
-
-
-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
-  ASSERT(state_ == MARK_LIVE_OBJECTS);
-  state_ =
-      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  heap()->lo_space()->FreeUnmarkedObjects();
-}
-
-
-// Safe to use during marking phase only.
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
-  MapWord metamap = object->map_word();
-  metamap.ClearMark();
-  return metamap.ToMap()->instance_type() == MAP_TYPE;
-}
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
+  HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -1711,17 +2268,19 @@
   // scan the descriptor arrays of those maps, not all maps.
   // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
-  for (HeapObject* obj = map_iterator.next();
-       obj != NULL; obj = map_iterator.next()) {
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL; obj = map_iterator.Next()) {
     Map* map = reinterpret_cast<Map*>(obj);
-    if (!map->IsMarked() && map->IsByteArray()) continue;
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    if (map->IsFreeSpace()) continue;
 
-    ASSERT(SafeIsMap(map));
+    ASSERT(map->IsMap());
     // Only JSObject and subtypes have map transitions and back pointers.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
 
-    if (map->IsMarked() && map->attached_to_shared_function_info()) {
+    if (map_mark.Get() &&
+        map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
       // from SharedFunctionInfo during the mark phase.
       // Since it survived the GC, reattach it now.
@@ -1730,52 +2289,55 @@
 
     // Clear dead prototype transitions.
     int number_of_transitions = map->NumberOfProtoTransitions();
-    if (number_of_transitions > 0) {
-      FixedArray* prototype_transitions =
-          map->unchecked_prototype_transitions();
-      int new_number_of_transitions = 0;
-      const int header = Map::kProtoTransitionHeaderSize;
-      const int proto_offset =
-          header + Map::kProtoTransitionPrototypeOffset;
-      const int map_offset = header + Map::kProtoTransitionMapOffset;
-      const int step = Map::kProtoTransitionElementsPerEntry;
-      for (int i = 0; i < number_of_transitions; i++) {
-        Object* prototype = prototype_transitions->get(proto_offset + i * step);
-        Object* cached_map = prototype_transitions->get(map_offset + i * step);
-        if (HeapObject::cast(prototype)->IsMarked() &&
-            HeapObject::cast(cached_map)->IsMarked()) {
-          if (new_number_of_transitions != i) {
-            prototype_transitions->set_unchecked(
-                heap_,
-                proto_offset + new_number_of_transitions * step,
-                prototype,
-                UPDATE_WRITE_BARRIER);
-            prototype_transitions->set_unchecked(
-                heap_,
-                map_offset + new_number_of_transitions * step,
-                cached_map,
-                SKIP_WRITE_BARRIER);
-          }
-          new_number_of_transitions++;
+    FixedArray* prototype_transitions = map->prototype_transitions();
+
+    int new_number_of_transitions = 0;
+    const int header = Map::kProtoTransitionHeaderSize;
+    const int proto_offset =
+        header + Map::kProtoTransitionPrototypeOffset;
+    const int map_offset = header + Map::kProtoTransitionMapOffset;
+    const int step = Map::kProtoTransitionElementsPerEntry;
+    for (int i = 0; i < number_of_transitions; i++) {
+      Object* prototype = prototype_transitions->get(proto_offset + i * step);
+      Object* cached_map = prototype_transitions->get(map_offset + i * step);
+      if (IsMarked(prototype) && IsMarked(cached_map)) {
+        if (new_number_of_transitions != i) {
+          prototype_transitions->set_unchecked(
+              heap_,
+              proto_offset + new_number_of_transitions * step,
+              prototype,
+              UPDATE_WRITE_BARRIER);
+          prototype_transitions->set_unchecked(
+              heap_,
+              map_offset + new_number_of_transitions * step,
+              cached_map,
+              SKIP_WRITE_BARRIER);
         }
       }
 
       // Fill slots that became free with undefined value.
-      Object* undefined = heap()->raw_unchecked_undefined_value();
+      Object* undefined = heap()->undefined_value();
       for (int i = new_number_of_transitions * step;
            i < number_of_transitions * step;
            i++) {
+        // The undefined object is on a page that is never compacted and never
+        // in new space so it is OK to skip the write barrier.  Also it's a
+        // root.
         prototype_transitions->set_unchecked(heap_,
                                              header + i,
                                              undefined,
                                              SKIP_WRITE_BARRIER);
+
+        Object** undefined_slot =
+            prototype_transitions->data_start() + i;
+        RecordSlot(undefined_slot, undefined_slot, undefined);
       }
       map->SetNumberOfProtoTransitions(new_number_of_transitions);
     }
 
     // Follow the chain of back pointers to find the prototype.
     Map* current = map;
-    while (SafeIsMap(current)) {
+    while (current->IsMap()) {
       current = reinterpret_cast<Map*>(current->prototype());
       ASSERT(current->IsHeapObject());
     }
@@ -1784,21 +2346,28 @@
     // Follow back pointers, setting them to prototype,
     // clearing map transitions when necessary.
     current = map;
-    bool on_dead_path = !current->IsMarked();
+    bool on_dead_path = !map_mark.Get();
     Object* next;
-    while (SafeIsMap(current)) {
+    while (current->IsMap()) {
       next = current->prototype();
       // There should never be a dead map above a live map.
-      ASSERT(on_dead_path || current->IsMarked());
+      MarkBit current_mark = Marking::MarkBitFrom(current);
+      bool is_alive = current_mark.Get();
+      ASSERT(on_dead_path || is_alive);
 
       // A live map above a dead map indicates a dead transition.
       // This test will always be false on the first iteration.
-      if (on_dead_path && current->IsMarked()) {
+      if (on_dead_path && is_alive) {
         on_dead_path = false;
         current->ClearNonLiveTransitions(heap(), real_prototype);
       }
       *HeapObject::RawField(current, Map::kPrototypeOffset) =
           real_prototype;
+
+      if (is_alive) {
+        Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+        RecordSlot(slot, slot, real_prototype);
+      }
       current = reinterpret_cast<Map*>(next);
     }
   }
@@ -1808,13 +2377,13 @@
 void MarkCompactCollector::ProcessWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = weak_map->unchecked_table();
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
     for (int i = 0; i < table->Capacity(); i++) {
-      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
         Object* value = table->get(table->EntryToValueIndex(i));
-        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
+        StaticMarkingVisitor::VisitPointer(heap(), &value);
         table->set_unchecked(heap(),
                              table->EntryToValueIndex(i),
                              value,
@@ -1829,12 +2398,12 @@
 void MarkCompactCollector::ClearWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = weak_map->unchecked_table();
+    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
     for (int i = 0; i < table->Capacity(); i++) {
-      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
-        table->RemoveEntry(i, heap());
+      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+        table->RemoveEntry(i);
       }
     }
     weak_map_obj = weak_map->next();
@@ -1843,316 +2412,97 @@
   set_encountered_weak_maps(Smi::FromInt(0));
 }
 
-// -------------------------------------------------------------------------
-// Phase 2: Encode forwarding addresses.
-// When compacting, forwarding addresses for objects in old space and map
-// space are encoded in their map pointer word (along with an encoding of
-// their map pointers).
-//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
-//
-// An address range [start, end) can have both live and non-live objects.
-// Maximal non-live regions are marked so they can be skipped on subsequent
-// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
-// free regions of one-word size (in which case the next word is the start
-// of a live object).  A second distinguished map-pointer encoding is used
-// to mark free regions larger than one word, and the size of the free
-// region (including the first word) is written to the second word of the
-// region.
-//
-// Any valid map page offset must lie in the object area of the page, so map
-// page offsets less than Page::kObjectStartOffset are invalid.  We use a
-// pair of distinguished invalid map encodings (for single word and multiple
-// words) to indicate free regions in the page found during computation of
-// forwarding addresses and skipped over in subsequent sweeps.
-
-
-// Encode a free region, defined by the given start address and size, in the
-// first word or two of the region.
-void EncodeFreeRegion(Address free_start, int free_size) {
-  ASSERT(free_size >= kIntSize);
-  if (free_size == kIntSize) {
-    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
-  } else {
-    ASSERT(free_size >= 2 * kIntSize);
-    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
-    Memory::int_at(free_start + kIntSize) = free_size;
-  }
-
-#ifdef DEBUG
-  // Zap the body of the free region.
-  if (FLAG_enable_slow_asserts) {
-    for (int offset = 2 * kIntSize;
-         offset < free_size;
-         offset += kPointerSize) {
-      Memory::Address_at(free_start + offset) = kZapValue;
-    }
-  }
-#endif
-}
-
-
-// Try to promote all objects in new space.  Heap numbers and sequential
-// strings are promoted to the code space, large objects to large object space,
-// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
-                                           HeapObject* object,
-                                           int object_size) {
-  MaybeObject* forwarded;
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
-    forwarded = Failure::Exception();
-  } else {
-    OldSpace* target_space = heap->TargetSpace(object);
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
-    forwarded = target_space->MCAllocateRaw(object_size);
-  }
-  Object* result;
-  if (!forwarded->ToObject(&result)) {
-    result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
-  }
-  return result;
-}
-
-
-// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
-    Heap *heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_pointer_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_data_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->code_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->map_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
-    Heap* heap, HeapObject* ignore, int object_size) {
-  return heap->cell_space()->MCAllocateRaw(object_size);
-}
-
-
-// The forwarding address is encoded at the same offset as the current
-// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(Heap* heap,
-                                              HeapObject* old_object,
-                                              int object_size,
-                                              Object* new_object,
-                                              int* ignored) {
-  int offset =
-      heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
-  Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
-      HeapObject::cast(new_object)->address();
-}
-
-
-// The forwarding address is encoded in the map pointer of the object as an
-// offset (in terms of live bytes) from the address of the first live object
-// in the page.
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
-                                                HeapObject* old_object,
-                                                int object_size,
-                                                Object* new_object,
-                                                int* offset) {
-  // Record the forwarding address of the first live object if necessary.
-  if (*offset == 0) {
-    Page::FromAddress(old_object->address())->mc_first_forwarded =
-        HeapObject::cast(new_object)->address();
-  }
-
-  MapWord encoding =
-      MapWord::EncodeAddress(old_object->map()->address(), *offset);
-  old_object->set_map_word(encoding);
-  *offset += object_size;
-  ASSERT(*offset <= Page::kObjectAreaSize);
-}
-
-
-// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
-
-
-// Function template that, given a range of addresses (eg, a semispace or a
-// paged space page), iterates through the objects in the range to clear
-// mark bits and compute and encode forwarding addresses.  As a side effect,
-// maximal free chunks are marked so that they can be skipped on subsequent
-// sweeps.
-//
-// The template parameters are an allocation function, a forwarding address
-// encoding function, and a function to process non-live objects.
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::EncodingFunction Encode,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
-                                             Address start,
-                                             Address end,
-                                             int* offset) {
-  // The start address of the current free region while sweeping the space.
-  // This address is set when a transition from live to non-live objects is
-  // encountered.  A value (an encoding of the 'next free region' pointer)
-  // is written to memory at this address when a transition from non-live to
-  // live objects is encountered.
-  Address free_start = NULL;
-
-  // A flag giving the state of the previously swept object.  Initially true
-  // to ensure that free_start is initialized to a proper address before
-  // trying to write to it.
-  bool is_prev_alive = true;
-
-  int object_size;  // Will be set on each iteration of the loop.
-  for (Address current = start; current < end; current += object_size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    if (object->IsMarked()) {
-      object->ClearMark();
-      collector->tracer()->decrement_marked_count();
-      object_size = object->Size();
-
-      Object* forwarded =
-          Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
-      Encode(collector->heap(), object, object_size, forwarded, offset);
-
-#ifdef DEBUG
-      if (FLAG_gc_verbose) {
-        PrintF("forward %p -> %p.\n", object->address(),
-               HeapObject::cast(forwarded)->address());
-      }
-#endif
-      if (!is_prev_alive) {  // Transition from non-live to live.
-        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
-        is_prev_alive = true;
-      }
-    } else {  // Non-live object.
-      object_size = object->Size();
-      ProcessNonLive(object, collector->heap()->isolate());
-      if (is_prev_alive) {  // Transition from live to non-live.
-        free_start = current;
-        is_prev_alive = false;
-      }
-      LiveObjectList::ProcessNonLive(object);
-    }
-  }
-
-  // If we ended on a free region, mark it.
-  if (!is_prev_alive) {
-    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
-  }
-}
-
-
-// Functions to encode the forwarding pointers in each compactable space.
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
-  int ignored;
-  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
-                                   EncodeForwardingAddressInNewSpace,
-                                   IgnoreNonLiveObject>(
-      this,
-      heap()->new_space()->bottom(),
-      heap()->new_space()->top(),
-      &ignored);
-}
-
-
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
-    PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-
-    // The offset of each live object in the page from the first live object
-    // in the page.
-    int offset = 0;
-    EncodeForwardingAddressesInRange<Alloc,
-                                     EncodeForwardingAddressInPagedSpace,
-                                     ProcessNonLive>(
-        this,
-        p->ObjectAreaStart(),
-        p->AllocationTop(),
-        &offset);
-  }
-}
-
 
 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
+//
 // The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
 // NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
-static void MigrateObject(Heap* heap,
-                          Address dst,
-                          Address src,
-                          int size,
-                          bool to_old_space) {
-  if (to_old_space) {
-    heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
-  } else {
-    heap->CopyBlock(dst, src, size);
-  }
+//
+// The second pass updates pointers to new space in all spaces.  It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space.  We should clear them to avoid encountering them during next
+// pointer iteration.  This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(Address dst,
+                                         Address src,
+                                         int size,
+                                         AllocationSpace dest) {
+  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+    Address src_slot = src;
+    Address dst_slot = dst;
+    ASSERT(IsAligned(size, kPointerSize));
 
+    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+      Object* value = Memory::Object_at(src_slot);
+
+      Memory::Object_at(dst_slot) = value;
+
+      if (heap_->InNewSpace(value)) {
+        heap_->store_buffer()->Mark(dst_slot);
+      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           reinterpret_cast<Object**>(dst_slot),
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+
+      src_slot += kPointerSize;
+      dst_slot += kPointerSize;
+    }
+
+    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
+      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+      Address code_entry = Memory::Address_at(code_entry_slot);
+
+      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT,
+                           code_entry_slot,
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+    }
+  } else if (dest == CODE_SPACE) {
+    PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+    heap()->MoveBlock(dst, src, size);
+    SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                       &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT,
+                       dst,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+  } else {
+    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    heap()->MoveBlock(dst, src, size);
+  }
   Memory::Address_at(src) = dst;
 }
 
 
-class StaticPointersToNewGenUpdatingVisitor : public
-  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
- public:
-  static inline void VisitPointer(Heap* heap, Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-
-    if (heap->new_space()->Contains(obj)) {
-      ASSERT(heap->InFromSpace(*p));
-      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
-    }
-  }
-};
-
-
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor: public ObjectVisitor {
  public:
-  explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
 
   void VisitPointer(Object** p) {
-    StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+    UpdatePointer(p);
   }
 
   void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) {
-      StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
-    }
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    VisitPointer(&target);
+    rinfo->set_target_object(target);
   }
 
   void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2172,68 +2522,96 @@
     rinfo->set_call_address(Code::cast(target)->instruction_start());
   }
 
+  static inline void UpdateSlot(Heap* heap, Object** slot) {
+    Object* obj = *slot;
+
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_obj = HeapObject::cast(obj);
+
+    MapWord map_word = heap_obj->map_word();
+    if (map_word.IsForwardingAddress()) {
+      ASSERT(heap->InFromSpace(heap_obj) ||
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+      HeapObject* target = map_word.ToForwardingAddress();
+      *slot = target;
+      ASSERT(!heap->InFromSpace(target) &&
+             !MarkCompactCollector::IsOnEvacuationCandidate(target));
+    }
+  }
+
  private:
+  inline void UpdatePointer(Object** p) {
+    UpdateSlot(heap_, p);
+  }
+
   Heap* heap_;
 };
 
 
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
-  if (!(*p)->IsHeapObject()) return;
+static void UpdatePointer(HeapObject** p, HeapObject* object) {
+  ASSERT(*p == object);
 
-  Address old_addr = (*p)->address();
-  ASSERT(HEAP->InFromSpace(*p));
+  Address old_addr = object->address();
 
   Address new_addr = Memory::Address_at(old_addr);
 
-  if (new_addr == NULL) {
-    // We encountered pointer to a dead object. Clear it so we will
-    // not visit it again during next iteration of dirty regions.
-    *p = NULL;
-  } else {
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  if (new_addr != NULL) {
     *p = HeapObject::FromAddress(new_addr);
+  } else {
+    // We have to zap this pointer, because the store buffer may overflow later,
+    // and then we have to scan the entire heap and we don't want to find
+    // spurious newspace pointers in the old space.
+    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
   }
 }
 
 
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
-                                                                 Object** p) {
-  Address old_addr = HeapObject::cast(*p)->address();
-  Address new_addr = Memory::Address_at(old_addr);
-  return String::cast(HeapObject::FromAddress(new_addr));
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
+  MapWord map_word = HeapObject::cast(*p)->map_word();
+
+  if (map_word.IsForwardingAddress()) {
+    return String::cast(map_word.ToForwardingAddress());
+  }
+
+  return String::cast(*p);
 }
 
 
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+                                            int object_size) {
   Object* result;
 
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
     MaybeObject* maybe_result =
-        heap->lo_space()->AllocateRawFixedArray(object_size);
+        heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap, target->address(), object->address(), object_size,
-                    true);
-      heap->mark_compact_collector()->tracer()->
+      MigrateObject(target->address(),
+                    object->address(),
+                    object_size,
+                    LO_SPACE);
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
   } else {
-    OldSpace* target_space = heap->TargetSpace(object);
+    OldSpace* target_space = heap()->TargetSpace(object);
 
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
+    ASSERT(target_space == heap()->old_pointer_space() ||
+           target_space == heap()->old_data_space());
     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap,
-                    target->address(),
+      MigrateObject(target->address(),
                     object->address(),
                     object_size,
-                    target_space == heap->old_pointer_space());
-      heap->mark_compact_collector()->tracer()->
+                    target_space->identity());
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
@@ -2243,84 +2621,558 @@
 }
 
 
-static void SweepNewSpace(Heap* heap, NewSpace* space) {
-  heap->CheckNewSpaceExpansionCriteria();
+void MarkCompactCollector::EvacuateNewSpace() {
+  heap()->CheckNewSpaceExpansionCriteria();
 
-  Address from_bottom = space->bottom();
-  Address from_top = space->top();
+  NewSpace* new_space = heap()->new_space();
+
+  // Store allocation range before flipping semispaces.
+  Address from_bottom = new_space->bottom();
+  Address from_top = new_space->top();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
-  space->Flip();
-  space->ResetAllocationInfo();
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
 
-  int size = 0;
   int survivors_size = 0;
 
   // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.
-  for (Address current = from_bottom; current < from_top; current += size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
-      size = object->Size();
+  // migrate live objects and write forwarding addresses.  This stage puts
+  // new entries in the store buffer and may cause some pages to be marked
+  // scan-on-scavenge.
+  SemiSpaceIterator from_it(from_bottom, from_top);
+  for (HeapObject* object = from_it.Next();
+       object != NULL;
+       object = from_it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      // Don't bother decrementing live bytes count. We'll discard the
+      // entire page at the end.
+      int size = object->Size();
       survivors_size += size;
 
       // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(heap, object, size)) {
+      if (TryPromoteObject(object, size)) {
         continue;
       }
 
       // Promotion failed. Just migrate object to another semispace.
-      // Allocation cannot fail at this point: semispaces are of equal size.
-      Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
+      MaybeObject* allocation = new_space->AllocateRaw(size);
+      if (allocation->IsFailure()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        ASSERT(!allocation->IsFailure());
+      }
+      Object* target = allocation->ToObjectUnchecked();
 
-      MigrateObject(heap,
-                    HeapObject::cast(target)->address(),
-                    current,
+      MigrateObject(HeapObject::cast(target)->address(),
+                    object->address(),
                     size,
-                    false);
+                    NEW_SPACE);
     } else {
       // Process the dead object before we write a NULL into its header.
       LiveObjectList::ProcessNonLive(object);
 
-      size = object->Size();
-      Memory::Address_at(current) = NULL;
+      // Mark dead objects in the new space with null in their map field.
+      Memory::Address_at(object->address()) = NULL;
     }
   }
 
-  // Second pass: find pointers to new space and update them.
-  PointersToNewGenUpdatingVisitor updating_visitor(heap);
+  heap_->IncrementYoungSurvivorsCounter(survivors_size);
+  new_space->set_age_mark(new_space->top());
+}
 
-  // Update pointers in to space.
-  Address current = space->bottom();
-  while (current < space->top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    current +=
-        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
-                                                           object);
+
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+  AlwaysAllocateScope always_allocate;
+  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
+  int offsets[16];
+
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+    if (cells[cell_index] == 0) continue;
+
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    for (int i = 0; i < live_objects; i++) {
+      Address object_addr = cell_base + offsets[i] * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(object_addr);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+      int size = object->Size();
+
+      MaybeObject* target = space->AllocateRaw(size);
+      if (target->IsFailure()) {
+        // OS refused to give us memory.
+        V8::FatalProcessOutOfMemory("Evacuation");
+        return;
+      }
+
+      Object* target_object = target->ToObjectUnchecked();
+
+      MigrateObject(HeapObject::cast(target_object)->address(),
+                    object_addr,
+                    size,
+                    space->identity());
+      ASSERT(object->map_word().IsForwardingAddress());
+    }
+
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  p->ResetLiveBytes();
+}
+
+
+void MarkCompactCollector::EvacuatePages() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    ASSERT(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    if (p->IsEvacuationCandidate()) {
+      // During compaction we might have to request a new page.
+      // Check that space still have room for that.
+      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
+        EvacuateLiveObjectsFromPage(p);
+      } else {
+        // Without room for expansion evacuation is not guaranteed to succeed.
+        // Pessimistically abandon unevacuated pages.
+        for (int j = i; j < npages; j++) {
+          Page* page = evacuation_candidates_[j];
+          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+          page->ClearEvacuationCandidate();
+          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+        return;
+      }
+    }
+  }
+}
+
+
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (object->IsHeapObject()) {
+      HeapObject* heap_object = HeapObject::cast(object);
+      MapWord map_word = heap_object->map_word();
+      if (map_word.IsForwardingAddress()) {
+        return map_word.ToForwardingAddress();
+      }
+    }
+    return object;
+  }
+};
+
+
+static inline void UpdateSlot(ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type,
+                              Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::cast(obj)->CodeIterateBody(v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::JS_RETURN_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+      rinfo.Visit(v);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+enum SweepingMode {
+  SWEEP_ONLY,
+  SWEEP_AND_VISIT_LIVE_OBJECTS
+};
+
+
+enum SkipListRebuildingMode {
+  REBUILD_SKIP_LIST,
+  IGNORE_SKIP_LIST
+};
+
+
+// Sweep a space precisely.  After this has been done the space can
+// be iterated precisely, hitting only the live objects.  Code space
+// is always swept precisely because we want to be able to iterate
+// over it.  Map space is swept precisely, because it is not compacted.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+static void SweepPrecisely(PagedSpace* space,
+                           Page* p,
+                           ObjectVisitor* v) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+            space->identity() == CODE_SPACE);
+  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address free_start = p->area_start();
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(free_start)));
+
+  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+  Address object_address = free_start;
+  int offsets[16];
+
+  SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
+  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+    skip_list->Clear();
   }
 
-  // Update roots.
-  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-  LiveObjectList::IterateElements(&updating_visitor);
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, object_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(object_address))));
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    int live_index = 0;
+    for ( ; live_objects != 0; live_objects--) {
+      Address free_end = object_address + offsets[live_index++] * kPointerSize;
+      if (free_end != free_start) {
+        space->Free(free_start, static_cast<int>(free_end - free_start));
+      }
+      HeapObject* live_object = HeapObject::FromAddress(free_end);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+      Map* map = live_object->map();
+      int size = live_object->SizeFromMap(map);
+      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+        live_object->IterateBody(map->instance_type(), size, v);
+      }
+      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+        int new_region_start =
+            SkipList::RegionNumber(free_end);
+        int new_region_end =
+            SkipList::RegionNumber(free_end + size - kPointerSize);
+        if (new_region_start != curr_region ||
+            new_region_end != curr_region) {
+          skip_list->AddObject(free_end, size);
+          curr_region = new_region_end;
+        }
+      }
+      free_start = free_end + size;
+    }
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  if (free_start != p->area_end()) {
+    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
+  }
+  p->ResetLiveBytes();
+}
 
-  // Update pointers in old spaces.
-  heap->IterateDirtyRegions(heap->old_pointer_space(),
-                            &Heap::IteratePointersInDirtyRegion,
-                            &UpdatePointerToNewGen,
-                            heap->WATERMARK_SHOULD_BE_VALID);
 
-  heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() ||
+      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    ASSERT(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}
+
+
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  bool code_slots_filtering_required;
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    code_slots_filtering_required = MarkInvalidatedCode();
+
+    EvacuateNewSpace();
+  }
+
+
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuatePages();
+  }
+
+  // Second pass: find pointers to new space and update them.
+  PointersUpdatingVisitor updating_visitor(heap());
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    // Update pointers in to space.
+    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                            heap()->new_space()->top());
+    for (HeapObject* object = to_it.Next();
+         object != NULL;
+         object = to_it.Next()) {
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(),
+                          object->SizeFromMap(map),
+                          &updating_visitor);
+    }
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+    // Update roots.
+    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+    LiveObjectList::IterateElements(&updating_visitor);
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
+    StoreBufferRebuildScope scope(heap_,
+                                  heap_->store_buffer(),
+                                  &Heap::ScavengeStoreBufferCallback);
+    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+  }
+
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                       migration_slots_buffer_,
+                                       code_slots_filtering_required);
+    if (FLAG_trace_fragmentation) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+
+    if (compacting_ && was_marked_incrementally_) {
+      // It's difficult to filter out slots recorded for large objects.
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        // LargeObjectSpace is not swept yet thus we have to skip
+        // dead objects explicitly.
+        if (!IsMarked(obj)) continue;
+
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          obj->Iterate(&updating_visitor);
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+
+  int npages = evacuation_candidates_.length();
+  { GCTracer::Scope gc_scope(
+      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      ASSERT(p->IsEvacuationCandidate() ||
+             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+      if (p->IsEvacuationCandidate()) {
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                           p->slots_buffer(),
+                                           code_slots_filtering_required);
+        if (FLAG_trace_fragmentation) {
+          PrintF("  page %p slots buffer: %d\n",
+                 reinterpret_cast<void*>(p),
+                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
+        }
+
+        // Important: skip list should be cleared only after roots were updated
+        // because root iteration traverses the stack and might have to find
+        // code objects from non-updated pc pointing into evacuation candidate.
+        SkipList* list = p->skip_list();
+        if (list != NULL) list->Clear();
+      } else {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+        switch (space->identity()) {
+          case OLD_DATA_SPACE:
+            SweepConservatively(space, p);
+            break;
+          case OLD_POINTER_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          case CODE_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
 
   // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap->cell_space());
-  for (HeapObject* cell = cell_iterator.next();
+  HeapObjectIterator cell_iterator(heap_->cell_space());
+  for (HeapObject* cell = cell_iterator.Next();
        cell != NULL;
-       cell = cell_iterator.next()) {
+       cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -2330,1020 +3182,567 @@
   }
 
   // Update pointer from the global contexts list.
-  updating_visitor.VisitPointer(heap->global_contexts_list_address());
+  updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+
+  heap_->symbol_table()->Iterate(&updating_visitor);
 
   // Update pointers from external string table.
-  heap->UpdateNewSpaceReferencesInExternalStringTable(
-      &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
-  // All pointers were updated. Update auxiliary allocation info.
-  heap->IncrementYoungSurvivorsCounter(survivors_size);
-  space->set_age_mark(space->top());
+  heap_->UpdateReferencesInExternalStringTable(
+      &UpdateReferenceInExternalStringTableEntry);
 
   // Update JSFunction pointers from the runtime profiler.
-  heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+      &updating_visitor);
+
+  EvacuationWeakObjectRetainer evacuation_object_retainer;
+  heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
+  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyEvacuation(heap_);
+  }
+#endif
+
+  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+  ASSERT(migration_slots_buffer_ == NULL);
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    space->Free(p->area_start(), p->area_size());
+    p->set_scan_on_scavenge(false);
+    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+    p->ClearEvacuationCandidate();
+  }
+  evacuation_candidates_.Rewind(0);
+  compacting_ = false;
 }
 
 
-static void SweepSpace(Heap* heap, PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
 
-  // During sweeping of paged space we are trying to find longest sequences
-  // of pages without live objects and free them (instead of putting them on
-  // the free list).
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte.  The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+  0, _, _, _, _,  // 0
+  1, 0, _, _, _,  // 1
+  1, 1, _, _, _,  // 2
+  X, _, _, _, _,  // 3
+  1, 2, _, _, _,  // 4
+  2, 0, 2, _, _,  // 5
+  X, _, _, _, _,  // 6
+  X, _, _, _, _,  // 7
+  1, 3, _, _, _,  // 8
+  2, 0, 3, _, _,  // 9
+  2, 1, 3, _, _,  // 10
+  X, _, _, _, _,  // 11
+  X, _, _, _, _,  // 12
+  X, _, _, _, _,  // 13
+  X, _, _, _, _,  // 14
+  X, _, _, _, _,  // 15
+  1, 4, _, _, _,  // 16
+  2, 0, 4, _, _,  // 17
+  2, 1, 4, _, _,  // 18
+  X, _, _, _, _,  // 19
+  2, 2, 4, _, _,  // 20
+  3, 0, 2, 4, _,  // 21
+  X, _, _, _, _,  // 22
+  X, _, _, _, _,  // 23
+  X, _, _, _, _,  // 24
+  X, _, _, _, _,  // 25
+  X, _, _, _, _,  // 26
+  X, _, _, _, _,  // 27
+  X, _, _, _, _,  // 28
+  X, _, _, _, _,  // 29
+  X, _, _, _, _,  // 30
+  X, _, _, _, _,  // 31
+  1, 5, _, _, _,  // 32
+  2, 0, 5, _, _,  // 33
+  2, 1, 5, _, _,  // 34
+  X, _, _, _, _,  // 35
+  2, 2, 5, _, _,  // 36
+  3, 0, 2, 5, _,  // 37
+  X, _, _, _, _,  // 38
+  X, _, _, _, _,  // 39
+  2, 3, 5, _, _,  // 40
+  3, 0, 3, 5, _,  // 41
+  3, 1, 3, 5, _,  // 42
+  X, _, _, _, _,  // 43
+  X, _, _, _, _,  // 44
+  X, _, _, _, _,  // 45
+  X, _, _, _, _,  // 46
+  X, _, _, _, _,  // 47
+  X, _, _, _, _,  // 48
+  X, _, _, _, _,  // 49
+  X, _, _, _, _,  // 50
+  X, _, _, _, _,  // 51
+  X, _, _, _, _,  // 52
+  X, _, _, _, _,  // 53
+  X, _, _, _, _,  // 54
+  X, _, _, _, _,  // 55
+  X, _, _, _, _,  // 56
+  X, _, _, _, _,  // 57
+  X, _, _, _, _,  // 58
+  X, _, _, _, _,  // 59
+  X, _, _, _, _,  // 60
+  X, _, _, _, _,  // 61
+  X, _, _, _, _,  // 62
+  X, _, _, _, _,  // 63
+  1, 6, _, _, _,  // 64
+  2, 0, 6, _, _,  // 65
+  2, 1, 6, _, _,  // 66
+  X, _, _, _, _,  // 67
+  2, 2, 6, _, _,  // 68
+  3, 0, 2, 6, _,  // 69
+  X, _, _, _, _,  // 70
+  X, _, _, _, _,  // 71
+  2, 3, 6, _, _,  // 72
+  3, 0, 3, 6, _,  // 73
+  3, 1, 3, 6, _,  // 74
+  X, _, _, _, _,  // 75
+  X, _, _, _, _,  // 76
+  X, _, _, _, _,  // 77
+  X, _, _, _, _,  // 78
+  X, _, _, _, _,  // 79
+  2, 4, 6, _, _,  // 80
+  3, 0, 4, 6, _,  // 81
+  3, 1, 4, 6, _,  // 82
+  X, _, _, _, _,  // 83
+  3, 2, 4, 6, _,  // 84
+  4, 0, 2, 4, 6,  // 85
+  X, _, _, _, _,  // 86
+  X, _, _, _, _,  // 87
+  X, _, _, _, _,  // 88
+  X, _, _, _, _,  // 89
+  X, _, _, _, _,  // 90
+  X, _, _, _, _,  // 91
+  X, _, _, _, _,  // 92
+  X, _, _, _, _,  // 93
+  X, _, _, _, _,  // 94
+  X, _, _, _, _,  // 95
+  X, _, _, _, _,  // 96
+  X, _, _, _, _,  // 97
+  X, _, _, _, _,  // 98
+  X, _, _, _, _,  // 99
+  X, _, _, _, _,  // 100
+  X, _, _, _, _,  // 101
+  X, _, _, _, _,  // 102
+  X, _, _, _, _,  // 103
+  X, _, _, _, _,  // 104
+  X, _, _, _, _,  // 105
+  X, _, _, _, _,  // 106
+  X, _, _, _, _,  // 107
+  X, _, _, _, _,  // 108
+  X, _, _, _, _,  // 109
+  X, _, _, _, _,  // 110
+  X, _, _, _, _,  // 111
+  X, _, _, _, _,  // 112
+  X, _, _, _, _,  // 113
+  X, _, _, _, _,  // 114
+  X, _, _, _, _,  // 115
+  X, _, _, _, _,  // 116
+  X, _, _, _, _,  // 117
+  X, _, _, _, _,  // 118
+  X, _, _, _, _,  // 119
+  X, _, _, _, _,  // 120
+  X, _, _, _, _,  // 121
+  X, _, _, _, _,  // 122
+  X, _, _, _, _,  // 123
+  X, _, _, _, _,  // 124
+  X, _, _, _, _,  // 125
+  X, _, _, _, _,  // 126
+  X, _, _, _, _,  // 127
+  1, 7, _, _, _,  // 128
+  2, 0, 7, _, _,  // 129
+  2, 1, 7, _, _,  // 130
+  X, _, _, _, _,  // 131
+  2, 2, 7, _, _,  // 132
+  3, 0, 2, 7, _,  // 133
+  X, _, _, _, _,  // 134
+  X, _, _, _, _,  // 135
+  2, 3, 7, _, _,  // 136
+  3, 0, 3, 7, _,  // 137
+  3, 1, 3, 7, _,  // 138
+  X, _, _, _, _,  // 139
+  X, _, _, _, _,  // 140
+  X, _, _, _, _,  // 141
+  X, _, _, _, _,  // 142
+  X, _, _, _, _,  // 143
+  2, 4, 7, _, _,  // 144
+  3, 0, 4, 7, _,  // 145
+  3, 1, 4, 7, _,  // 146
+  X, _, _, _, _,  // 147
+  3, 2, 4, 7, _,  // 148
+  4, 0, 2, 4, 7,  // 149
+  X, _, _, _, _,  // 150
+  X, _, _, _, _,  // 151
+  X, _, _, _, _,  // 152
+  X, _, _, _, _,  // 153
+  X, _, _, _, _,  // 154
+  X, _, _, _, _,  // 155
+  X, _, _, _, _,  // 156
+  X, _, _, _, _,  // 157
+  X, _, _, _, _,  // 158
+  X, _, _, _, _,  // 159
+  2, 5, 7, _, _,  // 160
+  3, 0, 5, 7, _,  // 161
+  3, 1, 5, 7, _,  // 162
+  X, _, _, _, _,  // 163
+  3, 2, 5, 7, _,  // 164
+  4, 0, 2, 5, 7,  // 165
+  X, _, _, _, _,  // 166
+  X, _, _, _, _,  // 167
+  3, 3, 5, 7, _,  // 168
+  4, 0, 3, 5, 7,  // 169
+  4, 1, 3, 5, 7   // 170
+};
+#undef _
+#undef X
 
-  // Page preceding current.
-  Page* prev = Page::FromAddress(NULL);
 
-  // First empty page in a sequence.
-  Page* first_empty_page = Page::FromAddress(NULL);
+// Takes a word of mark bits.  Returns the number of objects that start in the
+// range.  Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+  int objects = 0;
+  int offset = 0;
 
-  // Page preceding first empty page.
-  Page* prec_first_empty_page = Page::FromAddress(NULL);
+  // No consecutive 1 bits.
+  ASSERT((mark_bits & 0x180) != 0x180);
+  ASSERT((mark_bits & 0x18000) != 0x18000);
+  ASSERT((mark_bits & 0x1800000) != 0x1800000);
 
-  // If last used page of space ends with a sequence of dead objects
-  // we can adjust allocation top instead of puting this free area into
-  // the free list. Thus during sweeping we keep track of such areas
-  // and defer their deallocation until the sweeping of the next page
-  // is done: if one of the next pages contains live objects we have
-  // to put such area into the free list.
-  Address last_free_start = NULL;
-  int last_free_size = 0;
+  while (mark_bits != 0) {
+    int byte = (mark_bits & 0xff);
+    mark_bits >>= 8;
+    if (byte != 0) {
+      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
+      char* table = kStartTable + byte * kStartTableEntriesPerLine;
+      int objects_in_these_8_words = table[0];
+      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
+      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
+      for (int i = 0; i < objects_in_these_8_words; i++) {
+        starts[objects++] = offset + table[1 + i];
+      }
+    }
+    offset += 8;
+  }
+  return objects;
+}
+
+
+static inline Address DigestFreeStart(Address approximate_free_start,
+                                      uint32_t free_start_cell) {
+  ASSERT(free_start_cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
+
+  int offsets[16];
+  uint32_t cell = free_start_cell;
+  int offset_of_last_live;
+  if ((cell & 0x80000000u) != 0) {
+    // This case would overflow below.
+    offset_of_last_live = 31;
+  } else {
+    // Remove all but one bit, the most significant.  This is an optimization
+    // that may or may not be worthwhile.
+    cell |= cell >> 16;
+    cell |= cell >> 8;
+    cell |= cell >> 4;
+    cell |= cell >> 2;
+    cell |= cell >> 1;
+    cell = (cell + 1) >> 1;
+    int live_objects = MarkWordToObjectStarts(cell, offsets);
+    ASSERT(live_objects == 1);
+    offset_of_last_live = offsets[live_objects - 1];
+  }
+  Address last_live_start =
+      approximate_free_start + offset_of_last_live * kPointerSize;
+  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
+  Address free_start = last_live_start + last_live->Size();
+  return free_start;
+}
+
+
+static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
+  ASSERT(cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((cell & (cell << 1)) == 0);
+
+  int offsets[16];
+  if (cell == 0x80000000u) {  // Avoid overflow below.
+    return block_address + 31 * kPointerSize;
+  }
+  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
+  ASSERT((first_set_bit & cell) == first_set_bit);
+  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
+  ASSERT(live_objects == 1);
+  USE(live_objects);
+  return block_address + offsets[0] * kPointerSize;
+}
+
+
+// Sweeps a space conservatively.  After this has been done the larger free
+// spaces have been put on the free list and the smaller ones have been
+// ignored and left untouched.  A free space is always either ignored or put
+// on the free list, never split up into two parts.  This is important
+// because it means that any FreeSpace maps left actually describe a region of
+// memory that can be ignored when scanning.  Dead objects other than free
+// spaces will not contain the free space map.
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptConservatively();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_start())));
+
+  intptr_t freed_bytes = 0;
+
+  // This is the start of the 32 word block that we are currently looking at.
+  Address block_address = p->area_start();
+
+  // Skip over all the dead objects at the start of the page and mark them free.
+  for (;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    if (cells[cell_index] != 0) break;
+  }
+  size_t size = block_address - p->area_start();
+  if (cell_index == last_cell_index) {
+    freed_bytes += static_cast<int>(space->Free(p->area_start(),
+                                                static_cast<int>(size)));
+    ASSERT_EQ(0, p->LiveBytes());
+    return freed_bytes;
+  }
+  // Grow the size of the start-of-page free space a little to get up to the
+  // first live object.
+  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+  // Free the first free space.
+  size = free_end - p->area_start();
+  freed_bytes += space->Free(p->area_start(),
+                             static_cast<int>(size));
+  // The start of the current free area is represented in undigested form by
+  // the address of the last 32-word section that contained a live object and
+  // the marking bitmap for that cell, which describes where the live object
+  // started.  Unless we find a large free space in the bitmap we will not
+  // digest this pair into a real address.  We start the iteration here at the
+  // first word in the marking bit map that indicates a live object.
+  Address free_start = block_address;
+  uint32_t free_start_cell = cells[cell_index];
+
+  for ( ;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
+    uint32_t cell = cells[cell_index];
+    if (cell != 0) {
+      // We have a live object.  Check approximately whether it is more than 32
+      // words since the last live object.
+      if (block_address - free_start > 32 * kPointerSize) {
+        free_start = DigestFreeStart(free_start, free_start_cell);
+        if (block_address - free_start > 32 * kPointerSize) {
+          // Now that we know the exact start of the free space it still looks
+          // like we have a large enough free space to be worth bothering with.
+          // so now we need to find the start of the first live object at the
+          // end of the free space.
+          free_end = StartOfLiveObject(block_address, cell);
+          freed_bytes += space->Free(free_start,
+                                     static_cast<int>(free_end - free_start));
+        }
+      }
+      // Update our undigested record of where the current free area started.
+      free_start = block_address;
+      free_start_cell = cell;
+      // Clear marking bits for current cell.
+      cells[cell_index] = 0;
+    }
+  }
+
+  // Handle the free space at the end of the page.
+  if (block_address - free_start > 32 * kPointerSize) {
+    free_start = DigestFreeStart(free_start, free_start_cell);
+    freed_bytes += space->Free(free_start,
+                               static_cast<int>(block_address - free_start));
+  }
+
+  p->ResetLiveBytes();
+  return freed_bytes;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
+                                      sweeper == LAZY_CONSERVATIVE);
+
+  space->ClearStats();
+
+  PageIterator it(space);
+
+  intptr_t freed_bytes = 0;
+  int pages_swept = 0;
+  intptr_t newspace_size = space->heap()->new_space()->Size();
+  bool lazy_sweeping_active = false;
+  bool unused_page_present = false;
+
+  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t space_left =
+      Min(heap()->OldGenPromotionLimit(old_space_size),
+          heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
 
   while (it.has_next()) {
     Page* p = it.next();
 
-    bool is_previous_alive = true;
-    Address free_start = NULL;
-    HeapObject* object;
+    // Clear sweeping flags indicating that marking bits are still intact.
+    p->ClearSweptPrecisely();
+    p->ClearSweptConservatively();
 
-    for (Address current = p->ObjectAreaStart();
-         current < p->AllocationTop();
-         current += object->Size()) {
-      object = HeapObject::FromAddress(current);
-      if (object->IsMarked()) {
-        object->ClearMark();
-        heap->mark_compact_collector()->tracer()->decrement_marked_count();
+    if (p->IsEvacuationCandidate()) {
+      ASSERT(evacuation_candidates_.length() > 0);
+      continue;
+    }
 
-        if (!is_previous_alive) {  // Transition from free to live.
-          space->DeallocateBlock(free_start,
-                                 static_cast<int>(current - free_start),
-                                 true);
-          is_previous_alive = true;
+    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+      // Will be processed in EvacuateNewSpaceAndCandidates.
+      continue;
+    }
+
+    // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
         }
-      } else {
-        heap->mark_compact_collector()->ReportDeleteIfNeeded(
-            object, heap->isolate());
-        if (is_previous_alive) {  // Transition from live to free.
-          free_start = current;
-          is_previous_alive = false;
-        }
-        LiveObjectList::ProcessNonLive(object);
+        space->ReleasePage(p);
+        continue;
       }
-      // The object is now unmarked for the call to Size() at the top of the
-      // loop.
+      unused_page_present = true;
     }
 
-    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
-        || (!is_previous_alive && free_start == p->ObjectAreaStart());
-
-    if (page_is_empty) {
-      // This page is empty. Check whether we are in the middle of
-      // sequence of empty pages and start one if not.
-      if (!first_empty_page->is_valid()) {
-        first_empty_page = p;
-        prec_first_empty_page = prev;
-      }
-
-      if (!is_previous_alive) {
-        // There are dead objects on this page. Update space accounting stats
-        // without putting anything into free list.
-        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
-        if (size_in_bytes > 0) {
-          space->DeallocateBlock(free_start, size_in_bytes, false);
-        }
-      }
-    } else {
-      // This page is not empty. Sequence of empty pages ended on the previous
-      // one.
-      if (first_empty_page->is_valid()) {
-        space->FreePages(prec_first_empty_page, prev);
-        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
-      }
-
-      // If there is a free ending area on one of the previous pages we have
-      // deallocate that area and put it on the free list.
-      if (last_free_size > 0) {
-        Page::FromAddress(last_free_start)->
-            SetAllocationWatermark(last_free_start);
-        space->DeallocateBlock(last_free_start, last_free_size, true);
-        last_free_start = NULL;
-        last_free_size  = 0;
-      }
-
-      // If the last region of this page was not live we remember it.
-      if (!is_previous_alive) {
-        ASSERT(last_free_size == 0);
-        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
-        last_free_start = free_start;
-      }
-    }
-
-    prev = p;
-  }
-
-  // We reached end of space. See if we need to adjust allocation top.
-  Address new_allocation_top = NULL;
-
-  if (first_empty_page->is_valid()) {
-    // Last used pages in space are empty. We can move allocation top backwards
-    // to the beginning of first empty page.
-    ASSERT(prev == space->AllocationTopPage());
-
-    new_allocation_top = first_empty_page->ObjectAreaStart();
-  }
-
-  if (last_free_size > 0) {
-    // There was a free ending area on the previous page.
-    // Deallocate it without putting it into freelist and move allocation
-    // top to the beginning of this free area.
-    space->DeallocateBlock(last_free_start, last_free_size, false);
-    new_allocation_top = last_free_start;
-  }
-
-  if (new_allocation_top != NULL) {
-#ifdef DEBUG
-    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
-    if (!first_empty_page->is_valid()) {
-      ASSERT(new_allocation_top_page == space->AllocationTopPage());
-    } else if (last_free_size > 0) {
-      ASSERT(new_allocation_top_page == prec_first_empty_page);
-    } else {
-      ASSERT(new_allocation_top_page == first_empty_page);
-    }
-#endif
-
-    space->SetTop(new_allocation_top);
-  }
-}
-
-
-void MarkCompactCollector::EncodeForwardingAddresses() {
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  // Objects in the active semispace of the young generation may be
-  // relocated to the inactive semispace (if not promoted).  Set the
-  // relocation info to the beginning of the inactive semispace.
-  heap()->new_space()->MCResetRelocationInfo();
-
-  // Compute the forwarding pointers in each space.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->old_pointer_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->old_data_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->code_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->cell_space());
-
-
-  // Compute new space next to last after the old and code spaces have been
-  // compacted.  Objects in new space can be promoted to old or code space.
-  EncodeForwardingAddressesInNewSpace();
-
-  // Compute map space last because computing forwarding addresses
-  // overwrites non-live objects.  Objects in the other spaces rely on
-  // non-live map pointers to get the sizes of non-live objects.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->map_space());
-
-  // Write relocation info to the top page, so we can use it later.  This is
-  // done after promoting objects from the new space so we get the correct
-  // allocation top.
-  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
-  heap()->old_data_space()->MCWriteRelocationInfoToPage();
-  heap()->code_space()->MCWriteRelocationInfoToPage();
-  heap()->map_space()->MCWriteRelocationInfoToPage();
-  heap()->cell_space()->MCWriteRelocationInfoToPage();
-}
-
-
-class MapIterator : public HeapObjectIterator {
- public:
-  explicit MapIterator(Heap* heap)
-    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
-
-  MapIterator(Heap* heap, Address start)
-      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
-
- private:
-  static int SizeCallback(HeapObject* unused) {
-    USE(unused);
-    return Map::kSize;
-  }
-};
-
-
-class MapCompact {
- public:
-  explicit MapCompact(Heap* heap, int live_maps)
-    : heap_(heap),
-      live_maps_(live_maps),
-      to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
-      vacant_map_it_(heap),
-      map_to_evacuate_it_(heap, to_evacuate_start_),
-      first_map_to_evacuate_(
-          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
-  }
-
-  void CompactMaps() {
-    // As we know the number of maps to evacuate beforehand,
-    // we stop then there is no more vacant maps.
-    for (Map* next_vacant_map = NextVacantMap();
-         next_vacant_map;
-         next_vacant_map = NextVacantMap()) {
-      EvacuateMap(next_vacant_map, NextMapToEvacuate());
-    }
-
-#ifdef DEBUG
-    CheckNoMapsToEvacuate();
-#endif
-  }
-
-  void UpdateMapPointersInRoots() {
-    MapUpdatingVisitor map_updating_visitor;
-    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
-    heap()->isolate()->global_handles()->IterateWeakRoots(
-        &map_updating_visitor);
-    LiveObjectList::IterateElements(&map_updating_visitor);
-  }
-
-  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
-    ASSERT(space != heap()->map_space());
-
-    PageIterator it(space, PageIterator::PAGES_IN_USE);
-    while (it.has_next()) {
-      Page* p = it.next();
-      UpdateMapPointersInRange(heap(),
-                               p->ObjectAreaStart(),
-                               p->AllocationTop());
-    }
-  }
-
-  void UpdateMapPointersInNewSpace() {
-    NewSpace* space = heap()->new_space();
-    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
-  }
-
-  void UpdateMapPointersInLargeObjectSpace() {
-    LargeObjectIterator it(heap()->lo_space());
-    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
-      UpdateMapPointersInObject(heap(), obj);
-  }
-
-  void Finish() {
-    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  Heap* heap_;
-  int live_maps_;
-  Address to_evacuate_start_;
-  MapIterator vacant_map_it_;
-  MapIterator map_to_evacuate_it_;
-  Map* first_map_to_evacuate_;
-
-  // Helper class for updating map pointers in HeapObjects.
-  class MapUpdatingVisitor: public ObjectVisitor {
-  public:
-    MapUpdatingVisitor() {}
-
-    void VisitPointer(Object** p) {
-      UpdateMapPointer(p);
-    }
-
-    void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
-    }
-
-  private:
-    void UpdateMapPointer(Object** p) {
-      if (!(*p)->IsHeapObject()) return;
-      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
-
-      // Moved maps are tagged with overflowed map word.  They are the only
-      // objects those map word is overflowed as marking is already complete.
-      MapWord map_word = old_map->map_word();
-      if (!map_word.IsOverflowed()) return;
-
-      *p = GetForwardedMap(map_word);
-    }
-  };
-
-  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
-    while (true) {
-      HeapObject* next = it->next();
-      ASSERT(next != NULL);
-      if (next == last)
-        return NULL;
-      ASSERT(!next->IsOverflowed());
-      ASSERT(!next->IsMarked());
-      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
-      if (next->IsMap() == live)
-        return reinterpret_cast<Map*>(next);
-    }
-  }
-
-  Map* NextVacantMap() {
-    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
-    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
-    return map;
-  }
-
-  Map* NextMapToEvacuate() {
-    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
-    ASSERT(map != NULL);
-    ASSERT(map->IsMap());
-    return map;
-  }
-
-  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
-    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
-    ASSERT(map_to_evacuate->IsMap());
-
-    ASSERT(Map::kSize % 4 == 0);
-
-    map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
-        vacant_map->address(), map_to_evacuate->address(), Map::kSize);
-
-    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
-
-    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
-    forwarding_map_word.SetOverflow();
-    map_to_evacuate->set_map_word(forwarding_map_word);
-
-    ASSERT(map_to_evacuate->map_word().IsOverflowed());
-    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
-  }
-
-  static Map* GetForwardedMap(MapWord map_word) {
-    ASSERT(map_word.IsOverflowed());
-    map_word.ClearOverflow();
-    Map* new_map = map_word.ToMap();
-    ASSERT_MAP_ALIGNED(new_map->address());
-    return new_map;
-  }
-
-  static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
-    ASSERT(!obj->IsMarked());
-    Map* map = obj->map();
-    ASSERT(heap->map_space()->Contains(map));
-    MapWord map_word = map->map_word();
-    ASSERT(!map_word.IsMarked());
-    if (map_word.IsOverflowed()) {
-      Map* new_map = GetForwardedMap(map_word);
-      ASSERT(heap->map_space()->Contains(new_map));
-      obj->set_map(new_map);
-
-#ifdef DEBUG
+    if (lazy_sweeping_active) {
       if (FLAG_gc_verbose) {
-        PrintF("update %p : %p -> %p\n",
-               obj->address(),
-               reinterpret_cast<void*>(map),
-               reinterpret_cast<void*>(new_map));
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+               reinterpret_cast<intptr_t>(p));
       }
-#endif
+      continue;
     }
 
-    int size = obj->SizeFromMap(map);
-    MapUpdatingVisitor map_updating_visitor;
-    obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
-    return size;
-  }
-
-  static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
-    HeapObject* object;
-    int size;
-    for (Address current = start; current < end; current += size) {
-      object = HeapObject::FromAddress(current);
-      size = UpdateMapPointersInObject(heap, object);
-      ASSERT(size > 0);
+    switch (sweeper) {
+      case CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        SweepConservatively(space, p);
+        pages_swept++;
+        break;
+      }
+      case LAZY_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        freed_bytes += SweepConservatively(space, p);
+        pages_swept++;
+        if (space_left + freed_bytes > newspace_size) {
+          space->SetPagesToSweep(p->next_page());
+          lazy_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+            PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
+                   freed_bytes);
+          }
+        }
+        break;
+      }
+      case PRECISE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        if (space->identity() == CODE_SPACE) {
+          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+        } else {
+          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+        }
+        pages_swept++;
+        break;
+      }
+      default: {
+        UNREACHABLE();
+      }
     }
   }
 
-#ifdef DEBUG
-  void CheckNoMapsToEvacuate() {
-    if (!FLAG_enable_slow_asserts)
-      return;
-
-    for (HeapObject* obj = map_to_evacuate_it_.next();
-         obj != NULL; obj = map_to_evacuate_it_.next())
-      ASSERT(FreeListNode::IsFreeListNode(obj));
+  if (FLAG_gc_verbose) {
+    PrintF("SweepSpace: %s (%d pages swept)\n",
+           AllocationSpaceName(space->identity()),
+           pages_swept);
   }
-#endif
-};
+
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
+}
 
 
 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-
-  ASSERT(state_ == SWEEP_SPACES);
-  ASSERT(!IsCompacting());
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
+  SweeperType how_to_sweep =
+      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  if (sweep_precisely_) how_to_sweep = PRECISE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap(), heap()->old_pointer_space());
-  SweepSpace(heap(), heap()->old_data_space());
-  SweepSpace(heap(), heap()->code_space());
-  SweepSpace(heap(), heap()->cell_space());
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    SweepNewSpace(heap(), heap()->new_space());
-  }
-  SweepSpace(heap(), heap()->map_space());
+  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+  SweepSpace(heap()->old_data_space(), how_to_sweep);
 
-  heap()->IterateDirtyRegions(heap()->map_space(),
-                             &heap()->IteratePointersInDirtyMapsRegion,
-                             &UpdatePointerToNewGen,
-                             heap()->WATERMARK_SHOULD_BE_VALID);
+  RemoveDeadInvalidatedCode();
+  SweepSpace(heap()->code_space(), PRECISE);
 
-  intptr_t live_maps_size = heap()->map_space()->Size();
-  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
-  ASSERT(live_map_objects_size_ == live_maps_size);
+  SweepSpace(heap()->cell_space(), PRECISE);
 
-  if (heap()->map_space()->NeedsCompaction(live_maps)) {
-    MapCompact map_compact(heap(), live_maps);
+  EvacuateNewSpaceAndCandidates();
 
-    map_compact.CompactMaps();
-    map_compact.UpdateMapPointersInRoots();
+  // ClearNonLiveTransitions depends on precise sweeping of map space to
+  // detect whether unmarked map became dead in this collection or in one
+  // of the previous ones.
+  SweepSpace(heap()->map_space(), PRECISE);
 
-    PagedSpaces spaces;
-    for (PagedSpace* space = spaces.next();
-         space != NULL; space = spaces.next()) {
-      if (space == heap()->map_space()) continue;
-      map_compact.UpdateMapPointersInPagedSpace(space);
-    }
-    map_compact.UpdateMapPointersInNewSpace();
-    map_compact.UpdateMapPointersInLargeObjectSpace();
-
-    map_compact.Finish();
-  }
-}
-
-
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace).  The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the last
-// address in the range is top.  The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
-    Address start,
-    Address end,
-    LiveObjectCallback size_func) {
-  int live_objects_size = 0;
-  Address current = start;
-  while (current < end) {
-    uint32_t encoded_map = Memory::uint32_at(current);
-    if (encoded_map == kSingleFreeEncoding) {
-      current += kPointerSize;
-    } else if (encoded_map == kMultiFreeEncoding) {
-      current += Memory::int_at(current + kIntSize);
-    } else {
-      int size = (this->*size_func)(HeapObject::FromAddress(current));
-      current += size;
-      live_objects_size += size;
-    }
-  }
-  return live_objects_size;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    NewSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    PagedSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  int total = 0;
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
-                                       p->AllocationTop(),
-                                       size_f);
-  }
-  return total;
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 3: Update pointers
-
-// Helper class for updating pointers in HeapObjects.
-class UpdatingVisitor: public ObjectVisitor {
- public:
-  explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) {
-    UpdatePointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    // Mark all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    VisitPointer(&target);
-    rinfo->set_target_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    VisitPointer(&target);
-    rinfo->set_call_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  void UpdatePointer(Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-    Address new_addr;
-    ASSERT(!heap()->InFromSpace(obj));
-
-    if (heap()->new_space()->Contains(obj)) {
-      Address forwarding_pointer_addr =
-          heap()->new_space()->FromSpaceLow() +
-          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-      new_addr = Memory::Address_at(forwarding_pointer_addr);
-
-#ifdef DEBUG
-      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
-             heap()->old_data_space()->Contains(new_addr) ||
-             heap()->new_space()->FromSpaceContains(new_addr) ||
-             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
-      if (heap()->new_space()->FromSpaceContains(new_addr)) {
-        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-      }
-#endif
-
-    } else if (heap()->lo_space()->Contains(obj)) {
-      // Don't move objects in the large object space.
-      return;
-
-    } else {
-#ifdef DEBUG
-      PagedSpaces spaces;
-      PagedSpace* original_space = spaces.next();
-      while (original_space != NULL) {
-        if (original_space->Contains(obj)) break;
-        original_space = spaces.next();
-      }
-      ASSERT(original_space != NULL);
-#endif
-      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
-      ASSERT(original_space->Contains(new_addr));
-      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
-             original_space->MCSpaceOffsetForAddress(old_addr));
-    }
-
-    *p = HeapObject::FromAddress(new_addr);
-
-#ifdef DEBUG
-    if (FLAG_gc_verbose) {
-      PrintF("update %p : %p -> %p\n",
-             reinterpret_cast<Address>(p), old_addr, new_addr);
-    }
-#endif
-  }
-
-  Heap* heap_;
-};
-
-
-void MarkCompactCollector::UpdatePointers() {
-#ifdef DEBUG
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  state_ = UPDATE_POINTERS;
-#endif
-  UpdatingVisitor updating_visitor(heap());
-  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
-      &updating_visitor);
-  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
-  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
-
-  // Update the pointer to the head of the weak list of global contexts.
-  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
-
-  LiveObjectList::IterateElements(&updating_visitor);
-
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
-
-  // Large objects do not move, the map word can be updated directly.
-  LargeObjectIterator it(heap()->lo_space());
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
-    UpdatePointersInNewObject(obj);
-  }
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-}
-
-
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
-  // Keep old map pointers
-  Map* old_map = obj->map();
-  ASSERT(old_map->IsHeapObject());
-
-  Address forwarded = GetForwardingAddressInOldSpace(old_map);
-
-  ASSERT(heap()->map_space()->Contains(old_map));
-  ASSERT(heap()->map_space()->Contains(forwarded));
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
-           forwarded);
-  }
-#endif
-  // Update the map pointer.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
-
-  // We have to compute the object size relying on the old map because
-  // map objects are not relocated yet.
-  int obj_size = obj->SizeFromMap(old_map);
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
-  // Decode the map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // At this point, the first word of map_addr is also encoded, cannot
-  // cast it to Map* using Map::cast.
-  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
-  int obj_size = obj->SizeFromMap(map);
-  InstanceType type = map->instance_type();
-
-  // Update map pointer.
-  Address new_map_addr = GetForwardingAddressInOldSpace(map);
-  int offset = encoding.DecodeOffset();
-  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(),
-           map_addr, new_map_addr);
-  }
-#endif
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(type, obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
-  // Object should either in old or map space.
-  MapWord encoding = obj->map_word();
-
-  // Offset to the first live object's forwarding address.
-  int offset = encoding.DecodeOffset();
-  Address obj_addr = obj->address();
-
-  // Find the first live object's forwarding address.
-  Page* p = Page::FromAddress(obj_addr);
-  Address first_forwarded = p->mc_first_forwarded;
-
-  // Page start address of forwarded address.
-  Page* forwarded_page = Page::FromAddress(first_forwarded);
-  int forwarded_offset = forwarded_page->Offset(first_forwarded);
-
-  // Find end of allocation in the page of first_forwarded.
-  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
-
-  // Check if current object's forward pointer is in the same page
-  // as the first live object's forwarding pointer
-  if (forwarded_offset + offset < mc_top_offset) {
-    // In the same page.
-    return first_forwarded + offset;
-  }
-
-  // Must be in the next page, NOTE: this may cross chunks.
-  Page* next_page = forwarded_page->next_page();
-  ASSERT(next_page->is_valid());
-
-  offset -= (mc_top_offset - forwarded_offset);
-  offset += Page::kObjectStartOffset;
-
-  ASSERT_PAGE_OFFSET(offset);
-  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
-
-  return next_page->OffsetToAddress(offset);
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 4: Relocate objects
-
-void MarkCompactCollector::RelocateObjects() {
-#ifdef DEBUG
-  ASSERT(state_ == UPDATE_POINTERS);
-  state_ = RELOCATE_OBJECTS;
-#endif
-  // Relocates objects, always relocate map objects first. Relocating
-  // objects in other space relies on map objects to get object size.
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::RelocateOldPointerObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-
-  // Flip from and to spaces
-  heap()->new_space()->Flip();
-
-  heap()->new_space()->MCCommitRelocationInfo();
-
-  // Set age_mark to bottom in to space
-  Address mark = heap()->new_space()->bottom();
-  heap()->new_space()->set_age_mark(mark);
-
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->MCCommitRelocationInfo();
-
-  heap()->CheckNewSpaceExpansionCriteria();
-  heap()->IncrementYoungSurvivorsCounter(live_news_size);
-}
-
-
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset map pointer.  The meta map object may not be copied yet so
-  // Map::cast does not yet work.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   Map::kSize);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  return Map::kSize;
-}
-
-
-static inline int RestoreMap(HeapObject* obj,
-                             PagedSpace* space,
-                             Address new_addr,
-                             Address map_addr) {
-  // This must be a non-map object, and the function relies on the
-  // assumption that the Map space is compacted before the other paged
-  // spaces (see RelocateObjects).
-
-  // Reset map pointer.
-  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
-
-  int obj_size = obj->Size();
-  ASSERT_OBJECT_SIZE(obj_size);
-
-  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
-         space->MCSpaceOffsetForAddress(obj->address()));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
-  }
-#endif
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
-                                                   PagedSpace* space) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(map_addr));
-
-  // Get forwarding address before resetting map pointer.
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    if (space == heap()->old_data_space()) {
-      heap()->MoveBlock(new_addr, old_addr, obj_size);
-    } else {
-      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                     old_addr,
-                                                     obj_size);
-    }
-  }
-
-  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
-}
-
-
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
-}
-
-
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->cell_space());
-}
-
-
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlock(new_addr, old_addr, obj_size);
-  }
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsCode()) {
-    // May also update inline cache target.
-    Code::cast(copied_to)->Relocate(new_addr - old_addr);
-    // Notify the logger that compiled code has moved.
-    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
-  int obj_size = obj->Size();
-
-  // Get forwarding address
-  Address old_addr = obj->address();
-  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-
-  Address new_addr =
-    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
-
-#ifdef DEBUG
-  if (heap()->new_space()->FromSpaceContains(new_addr)) {
-    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-  } else {
-    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
-           heap()->TargetSpace(obj) == heap()->old_data_space());
-  }
-#endif
-
-  // New and old addresses cannot overlap.
-  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
-    heap()->CopyBlock(new_addr, old_addr, obj_size);
-  } else {
-    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   obj_size);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap_->lo_space()->FreeUnmarkedObjects();
 }
 
 
@@ -3359,6 +3758,9 @@
 }
 
 
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
                                                 Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -3372,16 +3774,148 @@
 }
 
 
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  return obj->SizeFromMap(map_word.ToMap());
+void MarkCompactCollector::Initialize() {
+  StaticMarkingVisitor::Initialize();
 }
 
 
-void MarkCompactCollector::Initialize() {
-  StaticPointersToNewGenUpdatingVisitor::Initialize();
-  StaticMarkingVisitor::Initialize();
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address,
+                        SlotType type,
+                        Address addr,
+                        AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  ASSERT(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  } else if (RelocInfo::IsJSReturn(rmode)) {
+    return SlotsBuffer::JS_RETURN_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotTypeForRMode(rinfo->rmode()),
+                            rinfo->pc(),
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotsBuffer::CODE_ENTRY_SLOT,
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap, slot);
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      UpdateSlot(&v,
+                 DecodeSlotType(slot),
+                 reinterpret_cast<Address>(slots_[slot_idx]));
+    }
+  }
+}
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        PointersUpdatingVisitor::UpdateSlot(heap, slot);
+      }
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(&v,
+                   DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
 }
 
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index f72c813..e0a7d94 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_
 
+#include "compiler-intrinsics.h"
 #include "spaces.h"
 
 namespace v8 {
@@ -45,54 +46,340 @@
 class RootMarkingVisitor;
 
 
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
+class Marking {
  public:
-  MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+  explicit Marking(Heap* heap)
+      : heap_(heap) {
+  }
+
+  static inline MarkBit MarkBitFrom(Address addr);
+
+  static inline MarkBit MarkBitFrom(HeapObject* obj) {
+    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  }
+
+  // Impossible markbits: 01
+  static const char* kImpossibleBitPattern;
+  static inline bool IsImpossible(MarkBit mark_bit) {
+    return !mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  // Black markbits: 10 - this is required by the sweeper.
+  static const char* kBlackBitPattern;
+  static inline bool IsBlack(MarkBit mark_bit) {
+    return mark_bit.Get() && !mark_bit.Next().Get();
+  }
+
+  // White markbits: 00 - this is required by the mark bit clearer.
+  static const char* kWhiteBitPattern;
+  static inline bool IsWhite(MarkBit mark_bit) {
+    return !mark_bit.Get();
+  }
+
+  // Grey markbits: 11
+  static const char* kGreyBitPattern;
+  static inline bool IsGrey(MarkBit mark_bit) {
+    return mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  static inline void MarkBlack(MarkBit mark_bit) {
+    mark_bit.Set();
+    mark_bit.Next().Clear();
+  }
+
+  static inline void BlackToGrey(MarkBit markbit) {
+    markbit.Next().Set();
+  }
+
+  static inline void WhiteToGrey(MarkBit markbit) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  static inline void GreyToBlack(MarkBit markbit) {
+    markbit.Next().Clear();
+  }
+
+  static inline void BlackToGrey(HeapObject* obj) {
+    BlackToGrey(MarkBitFrom(obj));
+  }
+
+  static inline void AnyToGrey(MarkBit markbit) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  // Returns true if the the object whose mark is transferred is marked black.
+  bool TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+  enum ObjectColor {
+    BLACK_OBJECT,
+    WHITE_OBJECT,
+    GREY_OBJECT,
+    IMPOSSIBLE_COLOR
+  };
+
+  static const char* ColorName(ObjectColor color) {
+    switch (color) {
+      case BLACK_OBJECT: return "black";
+      case WHITE_OBJECT: return "white";
+      case GREY_OBJECT: return "grey";
+      case IMPOSSIBLE_COLOR: return "impossible";
+    }
+    return "error";
+  }
+
+  static ObjectColor Color(HeapObject* obj) {
+    return Color(Marking::MarkBitFrom(obj));
+  }
+
+  static ObjectColor Color(MarkBit mark_bit) {
+    if (IsBlack(mark_bit)) return BLACK_OBJECT;
+    if (IsWhite(mark_bit)) return WHITE_OBJECT;
+    if (IsGrey(mark_bit)) return GREY_OBJECT;
+    UNREACHABLE();
+    return IMPOSSIBLE_COLOR;
+  }
+#endif
+
+  // Returns true if the transferred color is black.
+  INLINE(static bool TransferColor(HeapObject* from,
+                                   HeapObject* to)) {
+    MarkBit from_mark_bit = MarkBitFrom(from);
+    MarkBit to_mark_bit = MarkBitFrom(to);
+    bool is_black = false;
+    if (from_mark_bit.Get()) {
+      to_mark_bit.Set();
+      is_black = true;  // Looks black so far.
+    }
+    if (from_mark_bit.Next().Get()) {
+      to_mark_bit.Next().Set();
+      is_black = false;  // Was actually gray.
+    }
+    return is_black;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+
+class MarkingDeque {
+ public:
+  MarkingDeque()
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
 
   void Initialize(Address low, Address high) {
-    top_ = low_ = reinterpret_cast<HeapObject**>(low);
-    high_ = reinterpret_cast<HeapObject**>(high);
+    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+    array_ = obj_low;
+    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
+    top_ = bottom_ = 0;
     overflowed_ = false;
   }
 
-  bool is_full() const { return top_ >= high_; }
+  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
 
-  bool is_empty() const { return top_ <= low_; }
+  inline bool IsEmpty() { return top_ == bottom_; }
 
   bool overflowed() const { return overflowed_; }
 
-  void clear_overflowed() { overflowed_ = false; }
+  void ClearOverflowed() { overflowed_ = false; }
+
+  void SetOverflowed() { overflowed_ = true; }
 
   // Push the (marked) object on the marking stack if there is room,
   // otherwise mark the object as overflowed and wait for a rescan of the
   // heap.
-  void Push(HeapObject* object) {
-    CHECK(object->IsHeapObject());
-    if (is_full()) {
-      object->SetOverflow();
-      overflowed_ = true;
+  inline void PushBlack(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      Marking::BlackToGrey(object);
+      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+      SetOverflowed();
     } else {
-      *(top_++) = object;
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
     }
   }
 
-  HeapObject* Pop() {
-    ASSERT(!is_empty());
-    HeapObject* object = *(--top_);
-    CHECK(object->IsHeapObject());
+  inline void PushGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  inline HeapObject* Pop() {
+    ASSERT(!IsEmpty());
+    top_ = ((top_ - 1) & mask_);
+    HeapObject* object = array_[top_];
+    ASSERT(object->IsHeapObject());
     return object;
   }
 
+  inline void UnshiftGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      bottom_ = ((bottom_ - 1) & mask_);
+      array_[bottom_] = object;
+    }
+  }
+
+  HeapObject** array() { return array_; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }
+
  private:
-  HeapObject** low_;
-  HeapObject** top_;
-  HeapObject** high_;
+  HeapObject** array_;
+  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
+  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
+  // (mod mask + 1).
+  int top_;
+  int bottom_;
+  int mask_;
   bool overflowed_;
 
-  DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
+};
+
+
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {
+  }
+
+  void Add(ObjectSlot slot) {
+    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
+    slots_[idx_++] = slot;
+  }
+
+  enum SlotType {
+    EMBEDDED_OBJECT_SLOT,
+    RELOCATED_CODE_OBJECT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    JS_RETURN_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  void UpdateSlots(Heap* heap);
+
+  void UpdateSlotsWithFilter(Heap* heap);
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() {
+    return idx_ == kNumberOfElements;
+  }
+
+  inline bool HasSpaceForTypedSlot() {
+    return idx_ < kNumberOfElements - 1;
+  }
+
+  static void UpdateSlotsRecordedIn(Heap* heap,
+                                    SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
+    while (buffer != NULL) {
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
+      buffer = buffer->next();
+    }
+  }
+
+  enum AdditionMode {
+    FAIL_ON_OVERFLOW,
+    IGNORE_OVERFLOW
+  };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    ObjectSlot slot,
+                    AdditionMode mode) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    SlotType type,
+                    Address addr,
+                    AdditionMode mode);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 6;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
 };
 
 
@@ -102,9 +389,6 @@
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
-
-class OverflowedObjectsScanner;
-
 class MarkCompactCollector {
  public:
   // Type of functions to compute forwarding addresses of objects in
@@ -138,13 +422,18 @@
 
   // Set the global force_compaction flag, it must be called before Prepare
   // to take effect.
-  void SetForceCompaction(bool value) {
-    force_compaction_ = value;
+  inline void SetFlags(int flags);
+
+  inline bool PreciseSweepingRequired() {
+    return sweep_precisely_;
   }
 
-
   static void Initialize();
 
+  void CollectEvacuationCandidates(PagedSpace* space);
+
+  void AddEvacuationCandidate(Page* p);
+
   // Prepares for GC by resetting relocation info in old and map spaces and
   // choosing spaces to compact.
   void Prepare(GCTracer* tracer);
@@ -152,23 +441,9 @@
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  // True if the last full GC performed heap compaction.
-  bool HasCompacted() { return compacting_collection_; }
+  bool StartCompaction();
 
-  // True after the Prepare phase if the compaction is taking place.
-  bool IsCompacting() {
-#ifdef DEBUG
-    // For the purposes of asserts we don't want this to keep returning true
-    // after the collection is completed.
-    return state_ != IDLE && compacting_collection_;
-#else
-    return compacting_collection_;
-#endif
-  }
-
-  // The count of the number of objects left marked at the end of the last
-  // completed full GC (expected to be zero).
-  int previous_marked_count() { return previous_marked_count_; }
+  void AbortCompaction();
 
   // During a full GC, there is a stack-allocated GCTracer that is used for
   // bookkeeping information.  Return a pointer to that tracer.
@@ -183,29 +458,101 @@
   // Determine type of object and emit deletion log event.
   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
-  // Returns size of a possibly marked object.
-  static int SizeOfMarkedObject(HeapObject* obj);
-
   // Distinguishable invalid map encodings (for single word and multiple words)
   // that indicate free regions.
   static const uint32_t kSingleFreeEncoding = 0;
   static const uint32_t kMultiFreeEncoding = 1;
 
+  static inline bool IsMarked(Object* obj);
+
   inline Heap* heap() const { return heap_; }
 
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
   void EnableCodeFlushing(bool enable);
 
+  enum SweeperType {
+    CONSERVATIVE,
+    LAZY_CONSERVATIVE,
+    PRECISE
+  };
+
+#ifdef DEBUG
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+#endif
+
+  // Sweep a single page from the given space conservatively.
+  // Return a number of reclaimed bytes.
+  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+    return Page::FromAddress(reinterpret_cast<Address>(host))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+    return Page::FromAddress(reinterpret_cast<Address>(obj))->
+        IsEvacuationCandidate();
+  }
+
+  void EvictEvacuationCandidate(Page* page) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("Page %p is too popular. Disabling evacuation.\n",
+             reinterpret_cast<void*>(page));
+    }
+
+    // TODO(gc) If all evacuation candidates are too popular we
+    // should stop slots recording entirely.
+    page->ClearEvacuationCandidate();
+
+    // We were not collecting slots on this page that point
+    // to other evacuation candidates thus we have to
+    // rescan the page after evacuation to discover and update all
+    // pointers to evacuated objects.
+    if (page->owner()->identity() == OLD_DATA_SPACE) {
+      evacuation_candidates_.RemoveElement(page);
+    } else {
+      page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    }
+  }
+
+  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
+  void RecordCodeEntrySlot(Address slot, Code* target);
+
+  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+
+  void MigrateObject(Address dst,
+                     Address src,
+                     int size,
+                     AllocationSpace to_old_space);
+
+  bool TryPromoteObject(HeapObject* object, int object_size);
+
   inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
   inline void set_encountered_weak_maps(Object* weak_map) {
     encountered_weak_maps_ = weak_map;
   }
 
+  void InvalidateCode(Code* code);
+
+  void ClearMarkbits();
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
 
+  bool MarkInvalidatedCode();
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -221,23 +568,26 @@
   CollectorState state_;
 #endif
 
-  // Global flag that forces a compaction.
-  bool force_compaction_;
+  // Global flag that forces sweeping to be precise, so we can traverse the
+  // heap.
+  bool sweep_precisely_;
 
-  // Global flag indicating whether spaces were compacted on the last GC.
-  bool compacting_collection_;
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
 
-  // Global flag indicating whether spaces will be compacted on the next GC.
-  bool compact_on_next_gc_;
+  bool was_marked_incrementally_;
 
-  // The number of objects left marked at the end of the last completed full
-  // GC (expected to be zero).
-  int previous_marked_count_;
+  bool collect_maps_;
 
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
 
+  SlotsBufferAllocator slots_buffer_allocator_;
+
+  SlotsBuffer* migration_slots_buffer_;
+
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -270,13 +620,13 @@
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
 
-  void MarkUnmarkedObject(HeapObject* obj);
+  void AfterMarking();
 
-  inline void MarkObject(HeapObject* obj) {
-    if (!obj->IsMarked()) MarkUnmarkedObject(obj);
-  }
+  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
 
-  inline void SetMark(HeapObject* obj);
+  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+  void ProcessNewlyMarkedObject(HeapObject* obj);
 
   // Creates back pointers for all map transitions, stores them in
   // the prototype field.  The original prototype pointers are restored
@@ -310,18 +660,18 @@
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
-  void ProcessMarkingStack();
+  void ProcessMarkingDeque();
 
   // Mark objects reachable (transitively) from objects in the marking
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
   // overflow flag will be set.
-  void EmptyMarkingStack();
+  void EmptyMarkingDeque();
 
   // Refill the marking stack with overflowed objects from the heap.  This
   // function either leaves the marking stack full or clears the overflow
   // flag on the marking stack.
-  void RefillMarkingStack();
+  void RefillMarkingDeque();
 
   // After reachable maps have been marked process per context object
   // literal map caches removing unmarked entries.
@@ -331,21 +681,16 @@
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
 
-#ifdef DEBUG
-  void UpdateLiveObjectCount(HeapObject* obj);
-#endif
-
-  // We sweep the large object space in the same way whether we are
-  // compacting or not, because the large object space is never compacted.
-  void SweepLargeObjectSpace();
-
-  // Test whether a (possibly marked) object is a Map.
-  static inline bool SafeIsMap(HeapObject* object);
-
   // Map transitions from a live map to a dead map must be killed.
   // We replace them with a null descriptor, with the same key.
   void ClearNonLiveTransitions();
 
+  // Marking detaches initial maps from SharedFunctionInfo objects
+  // to make this reference weak. We need to reattach initial maps
+  // back after collection. This is either done during
+  // ClearNonLiveTransitions pass or by calling this function.
+  void ReattachInitialMaps();
+
   // Mark all values associated with reachable keys in weak maps encountered
   // so far.  This might push new object or even new weak maps onto the
   // marking stack.
@@ -358,164 +703,31 @@
 
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection, or else computing and encoding
-  // forwarding addresses for a compacting collection.
+  // a non-compacting collection.
   //
   //  Before: Live objects are marked and non-live objects are unmarked.
   //
-  //   After: (Non-compacting collection.)  Live objects are unmarked,
-  //          non-live regions have been added to their space's free
-  //          list.
+  //   After: Live objects are unmarked, non-live regions have been added to
+  //          their space's free list. Active eden semispace is compacted by
+  //          evacuation.
   //
-  //   After: (Compacting collection.)  The forwarding address of live
-  //          objects in the paged spaces is encoded in their map word
-  //          along with their (non-forwarded) map pointer.
-  //
-  //          The forwarding address of live objects in the new space is
-  //          written to their map word's offset in the inactive
-  //          semispace.
-  //
-  //          Bookkeeping data is written to the page header of
-  //          eached paged-space page that contains live objects after
-  //          compaction:
-  //
-  //          The allocation watermark field is used to track the
-  //          relocation top address, the address of the first word
-  //          after the end of the last live object in the page after
-  //          compaction.
-  //
-  //          The Page::mc_page_index field contains the zero-based index of the
-  //          page in its space.  This word is only used for map space pages, in
-  //          order to encode the map addresses in 21 bits to free 11
-  //          bits per map word for the forwarding address.
-  //
-  //          The Page::mc_first_forwarded field contains the (nonencoded)
-  //          forwarding address of the first live object in the page.
-  //
-  //          In both the new space and the paged spaces, a linked list
-  //          of live regions is constructructed (linked through
-  //          pointers in the non-live region immediately following each
-  //          live region) to speed further passes of the collector.
-
-  // Encodes forwarding addresses of objects in compactable parts of the
-  // heap.
-  void EncodeForwardingAddresses();
-
-  // Encodes the forwarding addresses of objects in new space.
-  void EncodeForwardingAddressesInNewSpace();
-
-  // Function template to encode the forwarding addresses of objects in
-  // paged spaces, parameterized by allocation and non-live processing
-  // functions.
-  template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
-  void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
-
-  // Iterates live objects in a space, passes live objects
-  // to a callback function which returns the heap size of the object.
-  // Returns the number of live objects iterated.
-  int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
-  int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
-  // Iterates the live objects between a range of addresses, returning the
-  // number of live objects.
-  int IterateLiveObjectsInRange(Address start, Address end,
-                                LiveObjectCallback size_func);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
   void SweepSpaces();
 
-  // -----------------------------------------------------------------------
-  // Phase 3: Updating pointers in live objects.
-  //
-  //  Before: Same as after phase 2 (compacting collection).
-  //
-  //   After: All pointers in live objects, including encoded map
-  //          pointers, are updated to point to their target's new
-  //          location.
+  void EvacuateNewSpace();
 
-  friend class UpdatingVisitor;  // helper for updating visited objects
+  void EvacuateLiveObjectsFromPage(Page* p);
 
-  // Updates pointers in all spaces.
-  void UpdatePointers();
+  void EvacuatePages();
 
-  // Updates pointers in an object in new space.
-  // Returns the heap size of the object.
-  int UpdatePointersInNewObject(HeapObject* obj);
+  void EvacuateNewSpaceAndCandidates();
 
-  // Updates pointers in an object in old spaces.
-  // Returns the heap size of the object.
-  int UpdatePointersInOldObject(HeapObject* obj);
-
-  // Calculates the forwarding address of an object in an old space.
-  static Address GetForwardingAddressInOldSpace(HeapObject* obj);
-
-  // -----------------------------------------------------------------------
-  // Phase 4: Relocating objects.
-  //
-  //  Before: Pointers to live objects are updated to point to their
-  //          target's new location.
-  //
-  //   After: Objects have been moved to their new addresses.
-
-  // Relocates objects in all spaces.
-  void RelocateObjects();
-
-  // Converts a code object's inline target to addresses, convention from
-  // address to target happens in the marking phase.
-  int ConvertCodeICTargetToAddress(HeapObject* obj);
-
-  // Relocate a map object.
-  int RelocateMapObject(HeapObject* obj);
-
-  // Relocates an old object.
-  int RelocateOldPointerObject(HeapObject* obj);
-  int RelocateOldDataObject(HeapObject* obj);
-
-  // Relocate a property cell object.
-  int RelocateCellObject(HeapObject* obj);
-
-  // Helper function.
-  inline int RelocateOldNonCodeObject(HeapObject* obj,
-                                      PagedSpace* space);
-
-  // Relocates an object in the code space.
-  int RelocateCodeObject(HeapObject* obj);
-
-  // Copy a new object.
-  int RelocateNewObject(HeapObject* obj);
+  void SweepSpace(PagedSpace* space, SweeperType sweeper);
 
 #ifdef DEBUG
-  // -----------------------------------------------------------------------
-  // Debugging variables, functions and classes
-  // Counters used for debugging the marking phase of mark-compact or
-  // mark-sweep collection.
-
-  // Size of live objects in Heap::to_space_.
-  int live_young_objects_size_;
-
-  // Size of live objects in Heap::old_pointer_space_.
-  int live_old_pointer_objects_size_;
-
-  // Size of live objects in Heap::old_data_space_.
-  int live_old_data_objects_size_;
-
-  // Size of live objects in Heap::code_space_.
-  int live_code_objects_size_;
-
-  // Size of live objects in Heap::map_space_.
-  int live_map_objects_size_;
-
-  // Size of live objects in Heap::cell_space_.
-  int live_cell_objects_size_;
-
-  // Size of live objects in Heap::lo_space_.
-  int live_lo_objects_size_;
-
-  // Number of live bytes in this collection.
-  int live_bytes_;
-
   friend class MarkObjectVisitor;
   static void VisitObject(HeapObject* obj);
 
@@ -524,15 +736,19 @@
 #endif
 
   Heap* heap_;
-  MarkingStack marking_stack_;
+  MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   Object* encountered_weak_maps_;
 
+  List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;
+
   friend class Heap;
-  friend class OverflowedObjectsScanner;
 };
 
 
+const char* AllocationSpaceName(AllocationSpace space);
+
 } }  // namespace v8::internal
 
 #endif  // V8_MARK_COMPACT_H_
diff --git a/src/math.js b/src/math.js
index b5a6d18..18492aa 100644
--- a/src/math.js
+++ b/src/math.js
@@ -189,7 +189,7 @@
 // ECMA 262 - 15.8.2.18
 function MathTan(x) {
   if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  return %Math_tan(x);
+  return %_MathTan(x);
 }
 
 
@@ -239,7 +239,7 @@
 
   // Set up non-enumerable functions of the Math object and
   // set their names.
-  InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
+  InstallFunctions($Math, DONT_ENUM, $Array(
     "random", MathRandom,
     "abs", MathAbs,
     "acos", MathAcos,
diff --git a/src/messages.cc b/src/messages.cc
index b6ad5ac..a0793c2 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -1,5 +1,4 @@
-
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -81,11 +80,11 @@
   }
 
   Handle<Object> stack_trace_handle = stack_trace.is_null()
-      ? FACTORY->undefined_value()
+      ? Handle<Object>::cast(FACTORY->undefined_value())
       : Handle<Object>::cast(stack_trace);
 
   Handle<Object> stack_frames_handle = stack_frames.is_null()
-      ? FACTORY->undefined_value()
+      ? Handle<Object>::cast(FACTORY->undefined_value())
       : Handle<Object>::cast(stack_frames);
 
   Handle<JSMessageObject> message =
@@ -127,7 +126,7 @@
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
       Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
@@ -149,12 +148,15 @@
           JSFunction::cast(
               Isolate::Current()->js_builtins_object()->
               GetPropertyNoExceptionThrown(*fmt_str)));
-  Object** argv[1] = { data.location() };
+  Handle<Object> argv[] = { data };
 
   bool caught_exception;
   Handle<Object> result =
       Execution::TryCall(fun,
-          Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
+                         Isolate::Current()->js_builtins_object(),
+                         ARRAY_SIZE(argv),
+                         argv,
+                         &caught_exception);
 
   if (caught_exception || !result->IsString()) {
     return FACTORY->LookupAsciiSymbol("<error>");
diff --git a/src/messages.js b/src/messages.js
index a9993af..5a3f12e 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -83,7 +83,7 @@
 // objects between script tags in a browser setting.
 function ToStringCheckErrorObject(obj) {
   if (IsNativeErrorObject(obj)) {
-    return %_CallFunction(obj, errorToString);
+    return %_CallFunction(obj, ErrorToString);
   } else {
     return ToString(obj);
   }
@@ -185,18 +185,20 @@
       "define_disallowed",            ["Cannot define property:", "%0", ", object is not extensible."],
       "non_extensible_proto",         ["%0", " is not extensible"],
       "handler_non_object",           ["Proxy.", "%0", " called with non-object as handler"],
-      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
+      "proto_non_object",             ["Proxy.", "%0", " called with non-object as prototype"],
+      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
       "handler_trap_missing",         ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
       "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
-      "handler_returned_false",       ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
-      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
-      "proxy_prop_not_configurable",  ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
-      "proxy_non_object_prop_names",  ["Trap ", "%1", " returned non-object ", "%0"],
-      "proxy_repeated_prop_name",     ["Trap ", "%1", " returned repeated property name ", "%2"],
+      "handler_returned_false",       ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+      "proxy_prop_not_configurable",  ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+      "proxy_non_object_prop_names",  ["Trap '", "%1", "' returned non-object ", "%0"],
+      "proxy_repeated_prop_name",     ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
       "invalid_weakmap_key",          ["Invalid value used as weak map key"],
       // RangeError
       "invalid_array_length",         ["Invalid array length"],
       "stack_overflow",               ["Maximum call stack size exceeded"],
+      "invalid_time_value",           ["Invalid time value"],
       // SyntaxError
       "unable_to_parse",              ["Parse error"],
       "invalid_regexp_flags",         ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
@@ -204,6 +206,7 @@
       "illegal_break",                ["Illegal break statement"],
       "illegal_continue",             ["Illegal continue statement"],
       "illegal_return",               ["Illegal return statement"],
+      "illegal_let",                  ["Illegal let declaration outside extended mode"],
       "error_loading_debugger",       ["Error loading debugger"],
       "no_input_to_regexp",           ["No input to ", "%0"],
       "invalid_json",                 ["String '", "%0", "' is not valid JSON"],
@@ -240,20 +243,26 @@
       "strict_poison_pill",           ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
       "strict_caller",                ["Illegal access to a strict mode caller function."],
       "unprotected_let",              ["Illegal let declaration in unprotected statement context."],
+      "unprotected_const",            ["Illegal const declaration in unprotected statement context."],
       "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
       "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
     ];
     var messages = { __proto__ : null };
-    var desc = new PropertyDescriptor();
-    desc.setConfigurable(false);
-    desc.setEnumerable(false);
-    desc.setWritable(false);
     for (var i = 0; i < messagesDictionary.length; i += 2) {
       var key = messagesDictionary[i];
       var format = messagesDictionary[i + 1];
-      ObjectFreeze(format);
-      desc.setValue(format);
-      DefineOwnProperty(messages, key, desc);
+
+      for (var j = 0; j < format.length; j++) {
+        %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
+                                        DONT_DELETE | READ_ONLY | DONT_ENUM);
+      }
+      %IgnoreAttributesAndSetProperty(format, 'length', format.length,
+                                      DONT_DELETE | READ_ONLY | DONT_ENUM);
+      %PreventExtensions(format);
+      %IgnoreAttributesAndSetProperty(messages,
+                                      key,
+                                      format,
+                                      DONT_DELETE | DONT_ENUM | READ_ONLY);
     }
     %PreventExtensions(messages);
     %IgnoreAttributesAndSetProperty(builtins, "kMessages",
@@ -386,7 +395,7 @@
   }
 
   return new SourceLocation(this, position, line, column, start, end);
-};
+}
 
 
 /**
@@ -416,7 +425,7 @@
   // resource.
   var column = opt_column || 0;
   if (line == 0) {
-    column -= this.column_offset
+    column -= this.column_offset;
   }
 
   var offset_position = opt_offset_position || 0;
@@ -431,7 +440,8 @@
       return null;
     }
 
-    return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
+    return this.locationFromPosition(
+        this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
   }
 }
 
@@ -447,8 +457,10 @@
  *     invalid
  */
 function ScriptSourceSlice(opt_from_line, opt_to_line) {
-  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
-  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
+  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
+                                              : opt_from_line;
+  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
+                                          : opt_to_line;
 
   // Adjust according to the offset within the resource.
   from_line -= this.line_offset;
@@ -468,8 +480,10 @@
   var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
 
   // Return a source slice with line numbers re-adjusted to the resource.
-  return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
-                         from_position, to_position);
+  return new SourceSlice(this,
+                         from_line + this.line_offset,
+                         to_line + this.line_offset,
+                          from_position, to_position);
 }
 
 
@@ -502,7 +516,7 @@
 function ScriptLineCount() {
   // Return number of source lines.
   return this.line_ends.length;
-};
+}
 
 
 /**
@@ -567,10 +581,10 @@
  *   position : position within the source
  *   start    : position of start of source context (inclusive)
  *   end      : position of end of source context (not inclusive)
- * Source text for the source context is the character interval [start, end[. In
- * most cases end will point to a newline character. It might point just past
- * the final position of the source if the last source line does not end with a
- * newline character.
+ * Source text for the source context is the character interval
+ * [start, end[. In most cases end will point to a newline character.
+ * It might point just past the final position of the source if the last
+ * source line does not end with a newline character.
  * @param {Script} script The Script object for which this is a location
  * @param {number} position Source position for the location
  * @param {number} line The line number for the location
@@ -637,7 +651,7 @@
       this.end = this.start + limit;
     }
   }
-};
+}
 
 
 /**
@@ -646,8 +660,11 @@
  *     Source text for this location.
  */
 function SourceLocationSourceText() {
-  return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
-};
+  return %_CallFunction(this.script.source,
+                        this.start,
+                        this.end,
+                        StringSubstring);
+}
 
 
 SetUpLockedPrototype(SourceLocation,
@@ -655,7 +672,7 @@
   $Array(
     "restrict", SourceLocationRestrict,
     "sourceText", SourceLocationSourceText
-  )
+ )
 );
 
 
@@ -695,7 +712,7 @@
                         this.from_position,
                         this.to_position,
                         StringSubstring);
-};
+}
 
 SetUpLockedPrototype(SourceSlice,
   $Array("script", "from_line", "to_line", "from_position", "to_position"),
@@ -742,12 +759,8 @@
     hasBeenSet = true;
     value = v;
   }
-  var desc = { get: getter,
-               set: setter,
-               enumerable: false,
-               configurable: true };
-  desc = ToPropertyDescriptor(desc);
-  DefineOwnProperty(obj, name, desc, true);
+  %DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM);
+  %DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM);
 }
 
 function CallSite(receiver, fun, pos) {
@@ -758,7 +771,7 @@
 
 function CallSiteGetThis() {
   return this.receiver;
-};
+}
 
 function CallSiteGetTypeName() {
   var constructor = this.receiver.constructor;
@@ -770,33 +783,33 @@
     return %_CallFunction(this.receiver, ObjectToString);
   }
   return constructorName;
-};
+}
 
 function CallSiteIsToplevel() {
   if (this.receiver == null) {
     return true;
   }
   return IS_GLOBAL(this.receiver);
-};
+}
 
 function CallSiteIsEval() {
   var script = %FunctionGetScript(this.fun);
   return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-};
+}
 
 function CallSiteGetEvalOrigin() {
   var script = %FunctionGetScript(this.fun);
   return FormatEvalOrigin(script);
-};
+}
 
 function CallSiteGetScriptNameOrSourceURL() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.nameOrSourceURL() : null;
-};
+}
 
 function CallSiteGetFunction() {
   return this.fun;
-};
+}
 
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
@@ -812,15 +825,19 @@
     return "eval";
   }
   return null;
-};
+}
 
 function CallSiteGetMethodName() {
   // See if we can find a unique property on the receiver that holds
   // this function.
   var ownName = this.fun.name;
   if (ownName && this.receiver &&
-      (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
-       %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
+      (%_CallFunction(this.receiver,
+                      ownName,
+                      ObjectLookupGetter) === this.fun ||
+       %_CallFunction(this.receiver,
+                      ownName,
+                      ObjectLookupSetter) === this.fun ||
        this.receiver[ownName] === this.fun)) {
     // To handle DontEnum properties we guess that the method has
     // the same name as the function.
@@ -830,7 +847,8 @@
   for (var prop in this.receiver) {
     if (this.receiver.__lookupGetter__(prop) === this.fun ||
         this.receiver.__lookupSetter__(prop) === this.fun ||
-        (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
+        (!this.receiver.__lookupGetter__(prop) &&
+         this.receiver[prop] === this.fun)) {
       // If we find more than one match bail out to avoid confusion.
       if (name) {
         return null;
@@ -842,12 +860,12 @@
     return name;
   }
   return null;
-};
+}
 
 function CallSiteGetFileName() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.name : null;
-};
+}
 
 function CallSiteGetLineNumber() {
   if (this.pos == -1) {
@@ -859,7 +877,7 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.line + 1 : null;
-};
+}
 
 function CallSiteGetColumnNumber() {
   if (this.pos == -1) {
@@ -871,16 +889,16 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.column + 1: null;
-};
+}
 
 function CallSiteIsNative() {
   var script = %FunctionGetScript(this.fun);
   return script ? (script.type == TYPE_NATIVE) : false;
-};
+}
 
 function CallSiteGetPosition() {
   return this.pos;
-};
+}
 
 function CallSiteIsConstructor() {
   var constructor = this.receiver ? this.receiver.constructor : null;
@@ -888,7 +906,7 @@
     return false;
   }
   return this.fun === constructor;
-};
+}
 
 SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
   "getThis", CallSiteGetThis,
@@ -931,12 +949,13 @@
       // eval script originated from "real" source.
       if (eval_from_script.name) {
         eval_origin += " (" + eval_from_script.name;
-        var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
+        var location = eval_from_script.locationFromPosition(
+            script.eval_from_script_position, true);
         if (location) {
           eval_origin += ":" + (location.line + 1);
           eval_origin += ":" + (location.column + 1);
         }
-        eval_origin += ")"
+        eval_origin += ")";
       } else {
         eval_origin += " (unknown source)";
       }
@@ -944,7 +963,7 @@
   }
 
   return eval_origin;
-};
+}
 
 function FormatSourcePosition(frame) {
   var fileName;
@@ -953,8 +972,9 @@
     fileLocation = "native";
   } else if (frame.isEval()) {
     fileName = frame.getScriptNameOrSourceURL();
-    if (!fileName)
+    if (!fileName) {
       fileLocation = frame.getEvalOrigin();
+    }
   } else {
     fileName = frame.getFileName();
   }
@@ -1063,7 +1083,7 @@
   DefineOneShotAccessor(obj, 'stack', function (obj) {
     return FormatRawStackTrace(obj, raw_stack);
   });
-};
+}
 
 
 function SetUpError() {
@@ -1126,6 +1146,7 @@
         return new f(m);
       }
     });
+    %SetNativeFlag(f);
   }
 
   DefineError(function Error() { });
@@ -1143,42 +1164,43 @@
 
 %SetProperty($Error.prototype, 'message', '', DONT_ENUM);
 
-// Global list of error objects visited during errorToString. This is
+// Global list of error objects visited during ErrorToString. This is
 // used to detect cycles in error toString formatting.
 const visited_errors = new InternalArray();
 const cyclic_error_marker = new $Object();
 
-function errorToStringDetectCycle(error) {
+function ErrorToStringDetectCycle(error) {
   if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
     var type = error.type;
+    var name = error.name;
+    name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
+    var message = error.message;
     var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
     if (type && !hasMessage) {
-      var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
-      return error.name + ": " + formatted;
+      message = FormatMessage(%NewMessageObject(type, error.arguments));
     }
-    var message = hasMessage ? (": " + error.message) : "";
-    return error.name + message;
+    message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
+    if (name === "") return message;
+    if (message === "") return name;
+    return name + ": " + message;
   } finally {
     visited_errors.length = visited_errors.length - 1;
   }
 }
 
-function errorToString() {
+function ErrorToString() {
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
     throw MakeTypeError("called_on_null_or_undefined",
                         ["Error.prototype.toString"]);
   }
-  // This helper function is needed because access to properties on
-  // the builtins object do not work inside of a catch clause.
-  function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
 
   try {
-    return errorToStringDetectCycle(this);
+    return ErrorToStringDetectCycle(this);
   } catch(e) {
     // If this error message was encountered already return the empty
     // string for it instead of recursively formatting it.
-    if (isCyclicErrorMarker(e)) {
+    if (e === cyclic_error_marker) {
       return '';
     }
     throw e;
@@ -1186,7 +1208,7 @@
 }
 
 
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
+InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
 
 // Boilerplate for exceptions for stack overflows. Used from
 // Isolate::StackOverflow().
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index c4c4fd2..2ba9760 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -78,7 +78,6 @@
 }
 
 
-
 // -----------------------------------------------------------------------------
 // RelocInfo.
 
@@ -117,9 +116,14 @@
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -146,9 +150,15 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -176,10 +186,17 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
+  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -200,6 +217,11 @@
   // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
   // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
   Assembler::set_target_address_at(pc_, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -242,12 +264,7 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    Object** p = target_object_address();
-    Object* orig = *p;
-    visitor->VisitPointer(p);
-    if (*p != orig) {
-      set_target_object(*p);
-    }
+    visitor->VisitEmbeddedPointer(this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -257,9 +274,9 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
-               IsPatchedReturnSequence()) ||
-              (RelocInfo::IsDebugBreakSlot(mode) &&
-               IsPatchedDebugBreakSlotSequence())) &&
+              IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
 #endif
@@ -273,7 +290,7 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e01a0ca..e933181 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -74,7 +74,9 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+                                CpuFeaturesImpliedByCompiler());
+  ASSERT(supported_ == 0 || supported_ == standard_features);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -82,8 +84,7 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also allowed for generated code in the
   // snapshot.
-  supported_ |= OS::CpuFeaturesImpliedByPlatform();
-  supported_ |= CpuFeaturesImpliedByCompiler();
+  supported_ |= standard_features;
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -2018,7 +2019,8 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2041,7 +2043,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 38e9537..b66ea0d 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -302,7 +302,7 @@
 const FPURegister f30 = { 30 };
 const FPURegister f31 = { 31 };
 
-const FPURegister kDoubleRegZero = f28;
+static const FPURegister& kDoubleRegZero = f28;
 
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index d772304..98fd57d 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -88,12 +88,6 @@
 }
 
 
-// This constant has the same value as JSArray::kPreallocatedArrayElements and
-// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
-// below should be reconsidered.
-static const int kLoopUnfoldLimit = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -103,16 +97,19 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity > 0);
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
   // Load the initial map from the array function.
   __ lw(scratch1, FieldMemOperand(array_function,
                                   JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -131,6 +128,11 @@
   __ mov(scratch3,  zero_reg);
   __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
+  if (initial_capacity == 0) {
+    __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+    return;
+  }
+
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -147,21 +149,31 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
   __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
 
-  // Fill the FixedArray with the hole value.
-  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < initial_capacity; i++) {
+  static const int kLoopUnfoldLimit = 4;
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    for (int i = 0; i < initial_capacity; i++) {
+      __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
+    }
+  } else {
+    Label loop, entry;
+    __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
+    __ Branch(&entry);
+    __ bind(&loop);
     __ sw(scratch3, MemOperand(scratch1));
     __ Addu(scratch1, scratch1, kPointerSize);
+    __ bind(&entry);
+    __ Branch(&loop, lt, scratch1, Operand(scratch2));
   }
 }
 
@@ -177,7 +189,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -185,31 +197,18 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
-
   // Load the initial map from the array function.
   __ lw(elements_array_storage,
          FieldMemOperand(array_function,
                          JSFunction::kPrototypeOrInitialMapOffset));
 
-  // Check whether an empty sized array is requested.
-  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize +
-             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch1,
-                        gc_required,
-                        TAG_OBJECT);
-  __ Branch(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ Assert(
+        ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
-  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ li(elements_array_end,
         (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
@@ -228,7 +227,6 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
-  __ bind(&allocated);
   __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ sw(elements_array_storage,
@@ -262,8 +260,6 @@
   // the actual JSArray has length 0 and the size of the JSArray for non-empty
   // JSArrays. The length of a FixedArray is stored as a smi.
   STATIC_ASSERT(kSmiTag == 0);
-  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-  __ movz(array_size, at, array_size);
 
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ sw(array_size, MemOperand(elements_array_storage));
@@ -312,18 +308,18 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more;
+  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
 
   // Check for array construction with zero arguments or one.
   __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
   // Handle construction of an empty array.
+  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        a1,
                        a2,
                        a3,
                        t0,
                        t1,
-                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
   // Setup return value, remove receiver from stack and return.
@@ -338,6 +334,12 @@
 
   STATIC_ASSERT(kSmiTag == 0);
   __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
+  __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
+  __ Drop(1);  // Adjust stack.
+  __ mov(a0, zero_reg);  // Treat this as a call with argc of zero.
+  __ Branch(&empty_array);
+
+  __ bind(&not_empty_array);
   __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
   __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
 
@@ -587,10 +589,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, v0);
   __ Branch(&argument_is_string);
@@ -606,10 +609,11 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
@@ -622,13 +626,12 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that the function is not a smi.
-  __ And(t0, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(a1, &non_function_call);
   // Check that the function is a JSFunction.
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // Jump to the function-specific construct stub.
   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -638,13 +641,21 @@
 
   // a0: number of arguments
   // a1: called object
+  // a2: object type
+  Label do_call;
+  __ bind(&slow);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // CALL_NON_FUNCTION expects the non-function constructor as receiver
   // (instead of the original receiver from the call site). The receiver is
   // stack element argc.
   // Set expected number of arguments to zero (not changing a0).
   __ mov(a2, zero_reg);
-  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(t1, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -667,331 +678,334 @@
   // -----------------------------------
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
-  __ MultiPushReversed(a0.bit() | a1.bit());
+    // Preserve the two incoming parameters on the stack.
+    __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+    __ MultiPushReversed(a0.bit() | a1.bit());
 
-  // Use t7 to hold undefined, which is used in several places below.
-  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+    // Use t7 to hold undefined, which is used in several places below.
+    __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
 
-  Label rt_call, allocated;
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    Label rt_call, allocated;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ li(a2, Operand(debug_step_in_fp));
-    __ lw(a2, MemOperand(a2));
-    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ li(a2, Operand(debug_step_in_fp));
+      __ lw(a2, MemOperand(a2));
+      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // a1: constructor function
-    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ And(t0, a2, Operand(kSmiTagMask));
-    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
-    __ GetObjectType(a2, a3, t4);
-    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+      // Load the initial map and verify that it is in fact a map.
+      // a1: constructor function
+      __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(a2, &rt_call);
+      __ GetObjectType(a2, a3, t4);
+      __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
-      __ lbu(t0, constructor_count);
-      __ Subu(t0, t0, Operand(1));
-      __ sb(t0, constructor_count);
-      __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
-      __ Push(a1, a2);
-
-      __ push(a1);  // Constructor.
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(a2);
-      __ pop(a1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size
-    // t4: JSObject (not tagged)
-    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(t5, t4);
-    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
-    __ Addu(t5, t5, Operand(3*kPointerSize));
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-    // Fill all the in-object properties with appropriate filler.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size (in words)
-    // t4: JSObject (not tagged)
-    // t5: First in-object property of JSObject (not tagged)
-    __ sll(t0, a3, kPointerSizeLog2);
-    __ addu(t6, t4, t0);   // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
+        Label allocate;
+        // Decrease generous allocation count.
+        __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+           FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+        __ lbu(t0, constructor_count);
+        __ Subu(t0, t0, Operand(1));
+        __ sb(t0, constructor_count);
+        __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+        __ Push(a1, a2);
+
+        __ push(a1);  // Constructor.
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(a2);
+        __ pop(a1);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size
+      // t4: JSObject (not tagged)
+      __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(t5, t4);
+      __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+      __ Addu(t5, t5, Operand(3*kPointerSize));
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+      // Fill all the in-object properties with appropriate filler.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size (in words)
+      // t4: JSObject (not tagged)
+      // t5: First in-object property of JSObject (not tagged)
+      __ sll(t0, a3, kPointerSizeLog2);
+      __ addu(t6, t4, t0);   // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      if (count_constructions) {
+        __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ sll(t0, a0, kPointerSizeLog2);
+        __ addu(a0, t5, t0);
+        // a0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ Assert(le, "Unexpected number of pre-allocated property fields.",
+              a0, Operand(t6));
+        }
+        __ InitializeFieldsWithFiller(t5, a0, t7);
         // To allow for truncation.
         __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
       }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(t5, 0));
-      __ addiu(t5, t5, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, Uless, t5, Operand(t6));
+      __ InitializeFieldsWithFiller(t5, t6, t7);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: start of next object (not tagged)
+      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+      __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+             kBitsPerByte);
+      __ Addu(a3, a3, Operand(t6));
+      __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ subu(a3, a3, t6);
+
+      // Done if no extra properties are to be allocated.
+      __ Branch(&allocated, eq, a3, Operand(zero_reg));
+      __ Assert(greater_equal, "Property allocation count failed.",
+          a3, Operand(zero_reg));
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // a1: constructor
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: start of next object
+      __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          a0,
+          t5,
+          t6,
+          a2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // a1: constructor
+      // a3: number of elements in properties array (un-tagged)
+      // t4: JSObject
+      // t5: start of next object
+      __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+      __ mov(a2, t5);
+      __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+      __ sll(a0, a3, kSmiTagSize);
+      __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+      __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+      // Initialize the fields to undefined.
+      // a1: constructor
+      // a2: First element of FixedArray (not tagged)
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ sll(t3, a3, kPointerSizeLog2);
+      __ addu(t6, a2, t3);  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+          __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+        }
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ sw(t7, MemOperand(a2));
+        __ addiu(a2, a2, kPointerSize);
+        __ bind(&entry);
+        __ Branch(&loop, less, a2, Operand(t6));
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated.
+      // a1: constructor function
+      // a4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // t4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(t4, t5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ Addu(t4, t4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    __ bind(&rt_call);
+    // Allocate the new receiver object using the runtime call.
     // a1: constructor function
+    __ push(a1);  // Argument for Runtime_NewObject.
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(t4, v0);
+
+    // Receiver for constructor call allocated.
     // t4: JSObject
-    // t5: start of next object (not tagged)
-    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-    __ And(t6,
-           a0,
-           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
-    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
-    __ Addu(a3, a3, Operand(t0));
-    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
-    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
-    __ subu(a3, a3, t0);
+    __ bind(&allocated);
+    __ push(t4);
 
-    // Done if no extra properties are to be allocated.
-    __ Branch(&allocated, eq, a3, Operand(zero_reg));
-    __ Assert(greater_equal, "Property allocation count failed.",
-        a3, Operand(zero_reg));
+    // Push the function and the allocated receiver from the stack.
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, kPointerSize));
+    __ MultiPushReversed(a1.bit() | t4.bit());
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // a1: constructor
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: start of next object
-    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        a0,
-        t5,
-        t6,
-        a2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+    // Reload the number of arguments from the stack.
+    // a1: constructor function
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ lw(a3, MemOperand(sp, 4 * kPointerSize));
 
-    // Initialize the FixedArray.
-    // a1: constructor
-    // a3: number of elements in properties array (un-tagged)
-    // t4: JSObject
-    // t5: start of next object
-    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
-    __ mov(a2, t5);
-    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
-    __ sll(a0, a3, kSmiTagSize);
-    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
-    __ Addu(a2, a2, Operand(2 * kPointerSize));
+    // Setup pointer to last argument.
+    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+    // Setup number of arguments for function call below.
+    __ srl(a0, a3, kSmiTagSize);
 
-    // Initialize the fields to undefined.
-    // a1: constructor
-    // a2: First element of FixedArray (not tagged)
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ sll(t3, a3, kPointerSizeLog2);
-    __ addu(t6, a2, t3);  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
-        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
-      }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(a2));
-      __ addiu(a2, a2, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, less, a2, Operand(t6));
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(t0, a2, Operand(t0));
+    __ lw(t1, MemOperand(t0));
+    __ push(t1);
+    __ bind(&entry);
+    __ Addu(a3, a3, Operand(-2));
+    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    if (is_api_function) {
+      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject.
-    // a1: constructor function
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+    // Pop the function from the stack.
+    // v0: result
+    // sp[0]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ Pop();
 
-    // Continue with JSObject being successfully allocated.
-    // a1: constructor function
-    // a4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // t4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(t4, t5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(v0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ GetObjectType(v0, a3, a3);
+    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ lw(v0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  __ bind(&rt_call);
-  // Allocate the new receiver object using the runtime call.
-  // a1: constructor function
-  __ push(a1);  // Argument for Runtime_NewObject.
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(t4, v0);
-
-  // Receiver for constructor call allocated.
-  // t4: JSObject
-  __ bind(&allocated);
-  __ push(t4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, kPointerSize));
-  __ MultiPushReversed(a1.bit() | t4.bit());
-
-  // Reload the number of arguments from the stack.
-  // a1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below.
-  __ srl(a0, a3, kSmiTagSize);
-
-  // Copy arguments and receiver to the expression stack.
-  // a0: number of arguments
-  // a1: constructor function
-  // a2: address of last argument (caller sp)
-  // a3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, a2, Operand(t0));
-  __ lw(t1, MemOperand(t0));
-  __ push(t1);
-  __ bind(&entry);
-  __ Addu(a3, a3, Operand(-2));
-  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
-
-  // Call the function.
-  // a0: number of arguments
-  // a1: constructor function
-  if (is_api_function) {
-    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // v0: result
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ Pop();
-
-  // Restore context from the frame.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ And(t0, v0, Operand(kSmiTagMask));
-  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ GetObjectType(v0, a3, a3);
-  __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ lw(v0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ sll(t0, a1, kPointerSizeLog2 - 1);
   __ Addu(sp, sp, t0);
   __ Addu(sp, sp, kPointerSize);
@@ -1031,59 +1045,61 @@
   __ mov(cp, zero_reg);
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-  // Set up the roots register.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
-  __ li(s6, Operand(roots_address));
+    // Set up the roots register.
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(masm->isolate());
+    __ li(s6, Operand(roots_array_start));
 
-  // Push the function and the receiver onto the stack.
-  __ Push(a1, a2);
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
 
-  // Copy arguments to the stack in a loop.
-  // a3: argc
-  // s0: argv, ie points to first arg
-  Label loop, entry;
-  __ sll(t0, a3, kPointerSizeLog2);
-  __ addu(t2, s0, t0);
-  __ b(&entry);
-  __ nop();   // Branch delay slot nop.
-  // t2 points past last arg.
-  __ bind(&loop);
-  __ lw(t0, MemOperand(s0));  // Read next parameter.
-  __ addiu(s0, s0, kPointerSize);
-  __ lw(t0, MemOperand(t0));  // Dereference handle.
-  __ push(t0);  // Push parameter.
-  __ bind(&entry);
-  __ Branch(&loop, ne, s0, Operand(t2));
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, ie points to first arg
+    Label loop, entry;
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t2, s0, t0);
+    __ b(&entry);
+    __ nop();   // Branch delay slot nop.
+    // t2 points past last arg.
+    __ bind(&loop);
+    __ lw(t0, MemOperand(s0));  // Read next parameter.
+    __ addiu(s0, s0, kPointerSize);
+    __ lw(t0, MemOperand(t0));  // Dereference handle.
+    __ push(t0);  // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(t2));
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-  __ mov(s1, t0);
-  __ mov(s2, t0);
-  __ mov(s3, t0);
-  __ mov(s4, t0);
-  __ mov(s5, t0);
-  // s6 holds the root address. Do not clobber.
-  // s7 is cp. Do not init.
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, t0);
+    __ mov(s2, t0);
+    __ mov(s3, t0);
+    __ mov(s4, t0);
+    __ mov(s5, t0);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
 
-  // Invoke the code and pass argc as a0.
-  __ mov(a0, a3);
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code and pass argc as a0.
+    __ mov(a0, a3);
+    if (is_construct) {
+      __ Call(masm->isolate()->builtins()->JSConstructCall());
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Leave internal frame.
   }
 
-  __ LeaveInternalFrame();
-
   __ Jump(ra);
 }
 
@@ -1100,27 +1116,28 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  // Call the runtime function.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    // Call the runtime function.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
@@ -1129,50 +1146,120 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
 }
 
 
-// These functions are called from C++ but cannot be used in live code.
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
+
+  // Get the full codegen state from the stack and untag it -> t2.
+  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(t2);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(&with_tos_register,
+            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+
+  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  RegList saved_regs =
+      (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
+  __ MultiPush(saved_regs);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
+  __ MultiPop(saved_regs);
+  __ Ret();
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  __ Abort("Call to unimplemented function in builtins-mips.cc");
+  CpuFeatures::TryForceFeatureScope scope(VFP3);
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ Abort("Unreachable code: Cannot optimize without FPU support.");
+    return;
+  }
+
+  // Lookup the function in the JavaScript frame and push it as an
+  // argument to the on-stack replacement function.
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(v0);
+  __ push(v0);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
 }
 
 
@@ -1190,19 +1277,19 @@
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
   // a0: actual number of arguments
-  Label non_function;
+  Label slow, non_function;
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(at, sp, at);
   __ lw(a1, MemOperand(at));
-  __ And(at, a1, Operand(kSmiTagMask));
-  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(a1, &non_function);
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // 3a. Patch the first argument if necessary when calling a function.
   // a0: actual number of arguments
   // a1: function
   Label shift_arguments;
+  __ li(t0, Operand(0, RelocInfo::NONE));  // Indicate regular JS_FUNCTION.
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1210,13 +1297,13 @@
     // Do not transform the receiver for strict mode functions.
     __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                                  kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Do not transform the receiver for native (Compilerhints already in a3).
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Compute the receiver in non-strict mode.
     // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1238,21 +1325,25 @@
     __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
-    __ push(a0);
+    // Enter an internal frame in order to preserve argument count.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+      __ push(a0);
 
-    __ push(a2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(a2, v0);
+      __ push(a2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(a2, v0);
 
-    __ pop(a0);
-    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
-    __ LeaveInternalFrame();
-    // Restore the function to a1.
+      __ pop(a0);
+      __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+      // Leave internal frame.
+    }
+    // Restore the function to a1, and the flag to t0.
     __ sll(at, a0, kPointerSizeLog2);
     __ addu(at, sp, at);
     __ lw(a1, MemOperand(at));
+    __ li(t0, Operand(0, RelocInfo::NONE));
     __ Branch(&patch_receiver);
 
     // Use the global receiver object from the called function as the
@@ -1273,25 +1364,31 @@
     __ Branch(&shift_arguments);
   }
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ li(t0, Operand(1, RelocInfo::NONE));  // Indicate function proxy.
+  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+  __ bind(&non_function);
+  __ li(t0, Operand(2, RelocInfo::NONE));  // Indicate non-function.
+
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
   // a0: actual number of arguments
   // a1: function
-  __ bind(&non_function);
-  // Restore the function in case it has been modified.
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(a2, sp, at);
   __ sw(a1, MemOperand(a2, -kPointerSize));
-  // Clear a1 to indicate a non-function being called.
-  __ mov(a1, zero_reg);
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
   // a0: actual number of arguments
   // a1: function
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
@@ -1309,14 +1406,26 @@
     __ Pop();
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
   // a0: actual number of arguments
   // a1: function
-  { Label function;
-    __ Branch(&function, ne, a1, Operand(zero_reg));
-    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
-    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+  { Label function, non_proxy;
+    __ Branch(&function, eq, t0, Operand(zero_reg));
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ mov(a2, zero_reg);
     __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Branch(&non_proxy, ne, t0, Operand(1));
+
+    __ push(a1);  // Re-add proxy object as additional argument.
+    __ Addu(a0, a0, Operand(1));
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -1350,134 +1459,158 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+    __ push(a0);
+    __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+    __ push(a0);
+    // Returns (in v0) number of arguments to copy to stack as Smi.
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
-  __ push(a0);
-  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
-  __ push(a0);
-  // Returns (in v0) number of arguments to copy to stack as Smi.
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    // Make a2 the space we have left. The stack might already be overflowed
+    // here which will cause a2 to become negative.
+    __ subu(a2, sp, a2);
+    // Check if the arguments will overflow the stack.
+    __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+    __ Branch(&okay, gt, a2, Operand(t3));  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying need to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
-  // Make a2 the space we have left. The stack might already be overflowed
-  // here which will cause a2 to become negative.
-  __ subu(a2, sp, a2);
-  // Check if the arguments will overflow the stack.
-  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
-  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+    // Out of stack space.
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ push(a1);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Out of stack space.
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ push(a1);
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(v0);  // Limit.
+    __ mov(a1, zero_reg);  // Initial index.
+    __ push(a1);
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(v0);  // Limit.
-  __ mov(a1, zero_reg);  // Initial index.
-  __ push(a1);
+    // Get the receiver.
+    __ lw(a0, MemOperand(fp, kRecvOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ lw(a0, MemOperand(fp, kFunctionOffset));
-  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in a0.
-  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
 
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
-  __ lw(a0, MemOperand(fp, kRecvOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in a1.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                               kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Do not transform the receiver for native (Compilerhints already in a2).
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Do not transform the receiver for native (Compilerhints already in a2).
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Compute the receiver in non-strict mode.
-  __ And(t0, a0, Operand(kSmiTagMask));
-  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
-  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(a0, &call_to_object);
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
 
-  // Check if the receiver is already a JavaScript object.
-  // a0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+    // Check if the receiver is already a JavaScript object.
+    // a0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a0, a1, a1);
+    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Convert the receiver to a regular object.
-  // a0: receiver
-  __ bind(&call_to_object);
-  __ push(a0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
-  __ Branch(&push_receiver);
+    // Convert the receiver to a regular object.
+    // a0: receiver
+    __ bind(&call_to_object);
+    __ push(a0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+    __ Branch(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
-  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+    __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // a0: receiver
-  __ bind(&push_receiver);
-  __ push(a0);
+    // Push the receiver.
+    // a0: receiver
+    __ bind(&push_receiver);
+    __ push(a0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Branch(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Branch(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // a0: current argument index
-  __ bind(&loop);
-  __ lw(a1, MemOperand(fp, kArgsOffset));
-  __ push(a1);
-  __ push(a0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // a0: current argument index
+    __ bind(&loop);
+    __ lw(a1, MemOperand(fp, kArgsOffset));
+    __ push(a1);
+    __ push(a0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(v0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(v0);
 
-  // Use inline caching to access the arguments.
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
-  __ sw(a0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+    __ sw(a0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ lw(a1, MemOperand(fp, kLimitOffset));
-  __ Branch(&loop, ne, a0, Operand(a1));
-  // Invoke the function.
-  ParameterCount actual(a0);
-  __ sra(a0, a0, kSmiTagSize);
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ lw(a1, MemOperand(fp, kLimitOffset));
+    __ Branch(&loop, ne, a0, Operand(a1));
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(a0);
+    __ sra(a0, a0, kSmiTagSize);
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+
+    frame_scope.GenerateLeaveFrame();
+    __ Ret(USE_DELAY_SLOT);
+    __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(a1);  // Add function proxy as last argument.
+    __ Addu(a0, a0, Operand(1));
+    __ li(a2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    // Tear down the internal frame and remove function, receiver and args.
+  }
+
+  __ Ret(USE_DELAY_SLOT);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index c3c3874..92abf6d 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -100,9 +100,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -190,16 +190,126 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: function.
+  // [sp + kPointerSize]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        v0, a1, a2, &gc, TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ lw(a3, MemOperand(sp, 0));
+
+  // Load the serialized scope info from the stack.
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+  // Setup the object header.
+  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ li(a2, Operand(Smi::FromInt(length)));
+  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(a3, &after_sentinel);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ Assert(eq, message, a3, Operand(zero_reg));
+  }
+  __ lw(a3, GlobalObjectOperand());
+  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+  __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Setup the fixed slots.
+  __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
+  __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
+  __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
+
+  // Copy the global object from the previous context.
+  __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, v0);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  // a3: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        fail,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    ASSERT((elements_size % kPointerSize) == 0);
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
+  //
   // [sp]: constant elements.
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -212,14 +322,42 @@
   __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
   __ Branch(&slow_case, eq, a3, Operand(t1));
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
+    __ Branch(&check_fast_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&check_fast_elements);
+    __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+    __ Branch(&double_elements, ne, v0, Operand(t1));
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    // Return and remove the on-stack parameters.
+    __ DropAndRet(3);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -231,34 +369,7 @@
     __ pop(a3);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  // Return new object in v0.
-  __ AllocateInNewSpace(size,
-                        v0,
-                        a1,
-                        a2,
-                        &slow_case,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ lw(a1, FieldMemOperand(a3, i));
-      __ sw(a1, FieldMemOperand(v0, i));
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ Addu(a2, v0, Operand(JSArray::kSize));
-    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
-  }
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
 
   // Return and remove the on-stack parameters.
   __ Addu(sp, sp, Operand(3 * kPointerSize));
@@ -269,6 +380,51 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: object literal flags.
+  // [sp + kPointerSize]: constant properties.
+  // [sp + (2 * kPointerSize)]: literal index.
+  // [sp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into a3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, t0, a3);
+  __ lw(a3, MemOperand(a3));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t0));
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ lw(a1, FieldMemOperand(a3, i));
+    __ sw(a1, FieldMemOperand(a0, i));
+  }
+
+  // Return and remove the on-stack parameters.
+  __ Drop(4);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -615,7 +771,7 @@
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
-                                                  FPURegister double_dst,
+                                                  DoubleRegister double_dst,
                                                   Register dst1,
                                                   Register dst2,
                                                   Register heap_number_map,
@@ -651,25 +807,16 @@
     // Load the double value.
     __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(single_scratch, double_dst);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_dst,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
 
     if (destination == kCoreRegisters) {
       __ Move(dst1, dst2, double_dst);
@@ -706,7 +853,7 @@
                                             Register scratch1,
                                             Register scratch2,
                                             Register scratch3,
-                                            FPURegister double_scratch,
+                                            DoubleRegister double_scratch,
                                             Label* not_int32) {
   ASSERT(!dst.is(object));
   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -735,27 +882,19 @@
     // Load the double value.
     __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(double_scratch, double_scratch);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    FPURegister single_scratch = double_scratch.low();
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
     // Get the result in the destination register.
-    __ mfc1(dst, double_scratch);
+    __ mfc1(dst, single_scratch);
 
   } else {
     // Load the double value in the destination registers.
@@ -881,9 +1020,11 @@
     __ Move(f12, a0, a1);
     __ Move(f14, a2, a3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   4);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number.
   if (!IsMipsSoftFloatABI) {
     CpuFeatures::Scope scope(FPU);
@@ -901,6 +1042,35 @@
 }
 
 
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+  // These variants are compiled ahead of time.  See next method.
+  if (the_int_.is(a1) &&
+      the_heap_number_.is(v0) &&
+      scratch_.is(a2) &&
+      sign_.is(a3)) {
+    return true;
+  }
+  if (the_int_.is(a2) &&
+      the_heap_number_.is(v0) &&
+      scratch_.is(a3) &&
+      sign_.is(a0)) {
+    return true;
+  }
+  // Other register combinations are generated as and when they are needed,
+  // so it is unsafe to call them from stubs (we can't generate a stub while
+  // we are generating a stub).
+  return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
+  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
+  stub1.GetCode()->set_is_pregenerated(true);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
 // See comment for class, this does NOT work for int32's that are in Smi range.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1068,8 +1238,7 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   Label lhs_is_smi;
-  __ And(t0, lhs, Operand(kSmiTagMask));
-  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(lhs, &lhs_is_smi);
   // Rhs is a Smi.
   // Check whether the non-smi is a heap number.
   __ GetObjectType(lhs, t4, t4);
@@ -1258,7 +1427,7 @@
 
   if (!CpuFeatures::IsSupported(FPU)) {
     __ push(ra);
-    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    __ PrepareCallCFunction(0, 2, t4);
     if (!IsMipsSoftFloatABI) {
       // We are not using MIPS FPU instructions, and parameters for the runtime
       // function call are prepaired in a0-a3 registers, but function we are
@@ -1268,19 +1437,17 @@
       __ Move(f12, a0, a1);
       __ Move(f14, a2, a3);
     }
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+       0, 2);
     __ pop(ra);  // Because this function returns int, result is in v0.
     __ Ret();
   } else {
     CpuFeatures::Scope scope(FPU);
     Label equal, less_than;
-    __ c(EQ, D, f12, f14);
-    __ bc1t(&equal);
-    __ nop();
-
-    __ c(OLT, D, f12, f14);
-    __ bc1t(&less_than);
-    __ nop();
+    __ BranchF(&equal, NULL, eq, f12, f14);
+    __ BranchF(&less_than, NULL, lt, f12, f14);
 
     // Not equal, not less, not NaN, must be greater.
     __ li(v0, Operand(GREATER));
@@ -1303,7 +1470,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into a2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1473,9 +1640,7 @@
       __ JumpIfSmi(probe, not_found);
       __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
       __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
-      __ c(EQ, D, f12, f14);
-      __ bc1t(&load_result_from_cache);
-      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
       __ Branch(not_found);
     } else {
       // Note that there is no cache check for non-FPU case, even though
@@ -1591,9 +1756,7 @@
     __ li(t2, Operand(EQUAL));
 
     // Check if either rhs or lhs is NaN.
-    __ c(UN, D, f12, f14);
-    __ bc1t(&nan);
-    __ nop();
+    __ BranchF(NULL, &nan, eq, f12, f14);
 
     // Check if LESS condition is satisfied. If true, move conditionally
     // result to v0.
@@ -1711,88 +1874,144 @@
 }
 
 
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses FPU instructions.
   CpuFeatures::Scope scope(FPU);
 
-  Label false_result;
-  Label not_heap_number;
-  Register scratch0 = t5.is(tos_) ? t3 : t5;
+  Label patch;
+  const Register map = t5.is(tos_) ? t3 : t5;
 
-  // undefined -> false
-  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  // undefined -> false.
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // Boolean -> its value
-  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
-  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the equal condition is satisfied.
-  __ Ret(eq, tos_, Operand(scratch0));
+  // Boolean -> its value.
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  // Smis: 0 -> false, all other -> true
-  __ And(scratch0, tos_, tos_);
-  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
-  __ And(scratch0, tos_, Operand(kSmiTagMask));
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the not equal condition is satisfied.
-  __ Ret(eq, scratch0, Operand(zero_reg));
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  // 'null' -> false
-  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    __ And(at, tos_, kSmiTagMask);
+    // tos_ contains the correct return value already
+    __ Ret(eq, at, Operand(zero_reg));
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(tos_, &patch);
+  }
 
-  // HeapNumber => false if +0, -0, or NaN.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+  if (types_.NeedsMap()) {
+    __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
 
-  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
-  __ fcmp(f12, 0.0, UEQ);
+    if (types_.CanBeUndetectable()) {
+      __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+      __ And(at, at, Operand(1 << Map::kIsUndetectable));
+      // Undetectable -> false.
+      __   movn(tos_, zero_reg, at);
+      __ Ret(ne, at, Operand(zero_reg));
+    }
+  }
 
-  // "tos_" is a register, and contains a non zero value by default.
-  // Hence we only need to overwrite "tos_" with zero to return false for
-  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-  __ movt(tos_, zero_reg);
-  __ Ret();
+  if (types_.Contains(SPEC_OBJECT)) {
+    // Spec object -> true.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    // tos_ contains the correct non-zero return value already.
+    __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+  }
 
-  __ bind(&not_heap_number);
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    Label skip;
+    __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+    __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+    __ Ret();  // the string length is OK as the return value
+    __ bind(&skip);
+  }
 
-  // It can be an undetectable object.
-  // Undetectable => false.
-  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
-  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
-  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+  if (types_.Contains(HEAP_NUMBER)) {
+    // Heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number;
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    __ Branch(&not_heap_number, ne, map, Operand(at));
+    Label zero_or_nan, number;
+    __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+    __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
+    // "tos_" is a register, and contains a non zero value by default.
+    // Hence we only need to overwrite "tos_" with zero to return false for
+    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+    __ bind(&zero_or_nan);
+    __ mov(tos_, zero_reg);
+    __ bind(&number);
+    __ Ret();
+    __ bind(&not_heap_number);
+  }
 
-  // JavaScript object => true.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
 
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Check for string.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    __ LoadRoot(at, value);
+    __ Subu(at, at, tos_);  // This is a check for equality for the movz below.
+    // The value of a root is never NULL, so we can avoid loading a non-null
+    // value into tos_ when we want to return 'true'.
+    if (!result) {
+      __ movz(tos_, zero_reg, at);
+    }
+    __ Ret(eq, at, Operand(zero_reg));
+  }
+}
 
-  // String value => false iff empty, i.e., length is zero.
-  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
-  // If length is zero, "tos_" contains zero ==> false.
-  // If length is not zero, "tos_" contains a non-zero value ==> true.
-  __ Ret();
 
-  // Return 0 in "tos_" for false.
-  __ bind(&false_result);
-  __ mov(tos_, zero_reg);
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ Move(a3, tos_);
+  __ li(a2, Operand(Smi::FromInt(tos_.code())));
+  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
+  __ Push(a3, a2, a1);
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ MultiPush(kJSCallerSaved | ra.bit());
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(FPU);
+    __ MultiPushFPU(kCallerSavedFPU);
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = a1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ li(a0, Operand(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(FPU);
+    __ MultiPopFPU(kCallerSavedFPU);
+  }
+
+  __ MultiPop(kJSCallerSaved | ra.bit());
   __ Ret();
 }
 
@@ -1951,12 +2170,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a1, v0);
-    __ pop(a0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a1, v0);
+      __ pop(a0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -1998,13 +2218,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(v0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a2, v0);  // Move the new heap number into a2.
-    // Get the heap number into v0, now that the new heap number is in a2.
-    __ pop(v0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(v0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a2, v0);  // Move the new heap number into a2.
+      // Get the heap number into v0, now that the new heap number is in a2.
+      __ pop(v0);
+    }
 
     // Convert the heap number in v0 to an untagged integer in a1.
     // This can't go slow-case because it's the same number we already
@@ -2115,6 +2336,9 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -2717,26 +2941,16 @@
           // Otherwise return a heap number if allowed, or jump to type
           // transition.
 
-          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-          // On MIPS a lot of things cannot be implemented the same way so right
-          // now it makes a lot more sense to just do things manually.
-
-          // Save FCSR.
-          __ cfc1(scratch1, FCSR);
-          // Disable FPU exceptions.
-          __ ctc1(zero_reg, FCSR);
-          __ trunc_w_d(single_scratch, f10);
-          // Retrieve FCSR.
-          __ cfc1(scratch2, FCSR);
-          // Restore FCSR.
-          __ ctc1(scratch1, FCSR);
-
-          // Check for inexact conversion or exception.
-          __ And(scratch2, scratch2, kFCSRFlagMask);
+          Register except_flag = scratch2;
+          __ EmitFPUTruncate(kRoundToZero,
+                             single_scratch,
+                             f10,
+                             scratch1,
+                             except_flag);
 
           if (result_type_ <= BinaryOpIC::INT32) {
-            // If scratch2 != 0, result does not fit in a 32-bit integer.
-            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+            // If except_flag != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, except_flag, Operand(zero_reg));
           }
 
           // Check if the result fits in a smi.
@@ -2929,9 +3143,9 @@
         __ Ret();
       } else {
         // Tail call that writes the int32 in a2 to the heap number in v0, using
-        // a3 and a1 as scratch. v0 is preserved and returned.
+        // a3 and a0 as scratch. v0 is preserved and returned.
         __ mov(a0, t1);
-        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
         __ TailCallStub(&stub);
       }
 
@@ -3225,10 +3439,12 @@
     __ lw(t0, MemOperand(cache_entry, 0));
     __ lw(t1, MemOperand(cache_entry, 4));
     __ lw(t2, MemOperand(cache_entry, 8));
-    __ Addu(cache_entry, cache_entry, 12);
     __ Branch(&calculate, ne, a2, Operand(t0));
     __ Branch(&calculate, ne, a3, Operand(t1));
     // Cache hit. Load result, cleanup and return.
+    Counters* counters = masm->isolate()->counters();
+    __ IncrementCounter(
+        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into v0.
       __ Drop(1);
@@ -3241,6 +3457,9 @@
   }  // if (CpuFeatures::IsSupported(FPU))
 
   __ bind(&calculate);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(
+      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
@@ -3259,13 +3478,13 @@
     // Register a0 holds precalculated cache entry address; preserve
     // it on the stack and pop it into register cache_entry after the
     // call.
-    __ push(cache_entry);
+    __ Push(cache_entry, a2, a3);
     GenerateCallCFunction(masm, scratch0);
     __ GetCFunctionDoubleResult(f4);
 
     // Try to update the cache. If we cannot allocate a
     // heap number, we return the result without updating.
-    __ pop(cache_entry);
+    __ Pop(cache_entry, a2, a3);
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
     __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3283,10 +3502,11 @@
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
     __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3299,14 +3519,15 @@
 
     // We return the value in f4 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ li(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ li(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3317,22 +3538,31 @@
   __ push(ra);
   __ PrepareCallCFunction(2, scratch);
   if (IsMipsSoftFloatABI) {
-    __ Move(v0, v1, f4);
+    __ Move(a0, a1, f4);
   } else {
     __ mov_d(f12, f4);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
+  Isolate* isolate = masm->isolate();
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(
-          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+          ExternalReference::math_sin_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::COS:
       __ CallCFunction(
-          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+          ExternalReference::math_cos_double_function(isolate),
+          0, 1);
+      break;
+    case TranscendentalCache::TAN:
+      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
-          ExternalReference::math_log_double_function(masm->isolate()), 2);
+          ExternalReference::math_log_double_function(isolate),
+          0, 1);
       break;
     default:
       UNIMPLEMENTED();
@@ -3347,6 +3577,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3415,12 +3646,15 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(ra);
-    __ PrepareCallCFunction(3, scratch);
+    __ PrepareCallCFunction(1, 1, scratch);
     __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()), 3);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
+      __ pop(ra);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(v0, heapnumber);
@@ -3443,15 +3677,20 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(ra);
-    __ PrepareCallCFunction(4, scratch);
+    __ PrepareCallCFunction(0, 2, scratch);
     // ABI (o32) for func(double a, double b): a in f12, b in f14.
     ASSERT(double_base.is(f12));
     ASSERT(double_exponent.is(f14));
     __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()), 4);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0,
+          2);
+      __ pop(ra);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(v0, heapnumber);
@@ -3468,6 +3707,37 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(v0);
 }
@@ -3490,16 +3760,17 @@
   // s1: pointer to the first argument          (C callee-saved)
   // s2: pointer to builtin function            (C callee-saved)
 
+  Isolate* isolate = masm->isolate();
+
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(1, a1);
-    __ CallCFunction(
-        ExternalReference::perform_gc_function(masm->isolate()), 1);
+    __ PrepareCallCFunction(1, 0, a1);
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
   }
 
   ExternalReference scope_depth =
-      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+      ExternalReference::heap_always_allocate_scope_depth(isolate);
   if (always_allocate) {
     __ li(a0, Operand(scope_depth));
     __ lw(a1, MemOperand(a0));
@@ -3588,18 +3859,16 @@
             v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
 
   // Retrieve the pending exception and clear the variable.
-  __ li(t0,
-        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(a3, MemOperand(t0));
+  __ li(a3, Operand(isolate->factory()->the_hole_value()));
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(v0, MemOperand(t0));
   __ sw(a3, MemOperand(t0));
 
   // Special handling of termination exceptions which are uncatchable
   // by javascript code.
   __ Branch(throw_termination_exception, eq,
-            v0, Operand(masm->isolate()->factory()->termination_exception()));
+            v0, Operand(isolate->factory()->termination_exception()));
 
   // Handle normal exception.
   __ jmp(throw_normal_exception);
@@ -3628,6 +3897,7 @@
   __ Subu(s1, s1, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
   // Setup argc and the builtin function in callee-saved registers.
@@ -3680,7 +3950,8 @@
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
+  Isolate* isolate = masm->isolate();
 
   // Registers:
   // a0: entry address
@@ -3699,8 +3970,11 @@
     CpuFeatures::Scope scope(FPU);
     // Save callee-saved FPU registers.
     __ MultiPushFPU(kCalleeSavedFPU);
+    // Set up the reserved register for 0.0.
+    __ Move(kDoubleRegZero, 0.0);
   }
 
+
   // Load argv in s0 register.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(FPU)) {
@@ -3715,7 +3989,7 @@
   __ li(t2, Operand(Smi::FromInt(marker)));
   __ li(t1, Operand(Smi::FromInt(marker)));
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(t0, MemOperand(t0));
   __ Push(t3, t2, t1, t0);
   // Setup frame pointer for the frame to be pushed.
@@ -3739,8 +4013,7 @@
 
   // If this is the outermost JS call, set js_entry_sp value.
   Label non_outermost_js;
-  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
-                                masm->isolate());
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ lw(t2, MemOperand(t1));
   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -3754,35 +4027,35 @@
   __ bind(&cont);
   __ push(t0);
 
-  // Call a faked try-block that does the invoke.
-  __ bal(&invoke);  // bal exposes branch delay slot.
-  __ nop();   // Branch delay slot nop.
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
-  // Coming in here the fp will be invalid because the PushTryHandler below
-  // sets it to 0 to signal the existence of the JSEntry frame.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
   __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);  // b exposes branch delay slot.
   __ nop();   // Branch delay slot nop.
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bal(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ li(t0,
-        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(t1, MemOperand(t0));
+  __ li(t1, Operand(isolate->factory()->the_hole_value()));
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(t1, MemOperand(t0));
 
   // Invoke the function by calling through JS entry trampoline builtin.
@@ -3805,7 +4078,7 @@
 
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
-                                      masm->isolate());
+                                      isolate);
     __ li(t0, Operand(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -3833,7 +4106,7 @@
   // Restore the top frame descriptors from the stack.
   __ pop(t1);
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ sw(t1, MemOperand(t0));
 
   // Reset the stack to the callee saved registers.
@@ -3857,11 +4130,10 @@
 // * object: a0 or at sp + 1 * kPointerSize.
 // * function: a1 or at sp.
 //
-// Inlined call site patching is a crankshaft-specific feature that is not
-// implemented on MIPS.
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register t0.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // This is a crankshaft-specific feature that has not been implemented yet.
-  ASSERT(!HasCallSiteInlineCheck());
   // Call site inlining and patching implies arguments in registers.
   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   // ReturnTrueFalse is only implemented for inlined call sites.
@@ -3875,6 +4147,8 @@
   const Register inline_site = t5;
   const Register scratch = a2;
 
+  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
+
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
   if (!HasArgsInRegisters()) {
@@ -3890,10 +4164,10 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
-    __ Branch(&miss, ne, function, Operand(t1));
-    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
-    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(at));
+    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(at));
     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3901,7 +4175,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -3913,7 +4187,16 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    ASSERT(HasArgsInRegisters());
+    // Patch the (relocated) inlined map check.
+
+    // The offset was stored in t0 safepoint slot.
+    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    __ LoadFromSafepointRegisterSlot(scratch, t0);
+    __ Subu(inline_site, ra, scratch);
+    // Get the map location in scratch and patch it.
+    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
+    __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   }
 
   // Register mapping: a3 is object map and t0 is function prototype.
@@ -3939,7 +4222,16 @@
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return true.
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      ASSERT_EQ(Smi::FromInt(0), 0);
+      __ mov(v0, zero_reg);
+    }
   }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3948,8 +4240,17 @@
     __ li(v0, Operand(Smi::FromInt(1)));
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return false.
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      __ li(v0, Operand(Smi::FromInt(1)));
+    }
   }
+
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   Label object_not_null, object_not_null_or_smi;
@@ -3986,10 +4287,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(a0, a1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a0, a1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ mov(a0, v0);
     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4411,10 +4713,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4427,6 +4725,8 @@
   static const int kSubjectOffset = 2 * kPointerSize;
   static const int kJSRegExpOffset = 3 * kPointerSize;
 
+  Isolate* isolate = masm->isolate();
+
   Label runtime, invoke_regexp;
 
   // Allocation of registers for this function. These are in callee save
@@ -4442,9 +4742,9 @@
   // Ensure that a RegExp stack is allocated.
   ExternalReference address_of_regexp_stack_memory_address =
       ExternalReference::address_of_regexp_stack_memory_address(
-          masm->isolate());
+          isolate);
   ExternalReference address_of_regexp_stack_memory_size =
-      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+      ExternalReference::address_of_regexp_stack_memory_size(isolate);
   __ li(a0, Operand(address_of_regexp_stack_memory_size));
   __ lw(a0, MemOperand(a0, 0));
   __ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4508,8 +4808,7 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
-  __ And(at, a0, Operand(kSmiTagMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ JumpIfNotSmi(a0, &runtime);
   __ Branch(&runtime, ls, a3, Operand(a0));
 
   // a2: Number of capture registers
@@ -4525,7 +4824,7 @@
          FieldMemOperand(a0, JSArray::kElementsOffset));
   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ Branch(&runtime, ne, a0, Operand(
-      masm->isolate()->factory()->fixed_array_map()));
+      isolate->factory()->fixed_array_map()));
   // Check that the last match info has space for the capture registers and the
   // additional information.
   __ lw(a0,
@@ -4542,25 +4841,38 @@
   Label seq_string;
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  // First check for flat string.
-  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  // First check for flat string.  None of the following string type tests will
+  // succeed if subject is not a string or a short external string.
+  __ And(a1,
+         a0,
+         Operand(kIsNotStringMask |
+                 kStringRepresentationMask |
+                 kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
 
   // subject: Subject string
   // a0: instance type if Subject string
   // regexp_data: RegExp data (FixedArray)
+  // a1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
-  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
 
   // String is sliced.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4580,7 +4892,7 @@
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ And(at, a0, Operand(kStringRepresentationMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
 
   __ bind(&seq_string);
   // subject: Subject string
@@ -4616,7 +4928,7 @@
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // All checks done. Now push arguments for native regexp code.
-  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
                       1, a0, a2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
@@ -4656,13 +4968,12 @@
 
   // Argument 5: static offsets vector buffer.
   __ li(a0, Operand(
-        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+        ExternalReference::address_of_static_offsets_vector(isolate)));
   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4715,11 +5026,9 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ li(a1, Operand(
-      ExternalReference::the_hole_value_location(masm->isolate())));
-  __ lw(a1, MemOperand(a1, 0));
+  __ li(a1, Operand(isolate->factory()->the_hole_value()));
   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      masm->isolate())));
+                                      isolate)));
   __ lw(v0, MemOperand(a2, 0));
   __ Branch(&runtime, eq, v0, Operand(a1));
 
@@ -4737,7 +5046,7 @@
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+  __ li(v0, Operand(isolate->factory()->null_value()));
   __ Addu(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -4757,20 +5066,29 @@
   __ sw(a2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
-  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+  __ mov(a2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      a2,
+                      t3,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ mov(a3, last_match_info_elements);
-  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      t3,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
-      ExternalReference::address_of_static_offsets_vector(masm->isolate());
+      ExternalReference::address_of_static_offsets_vector(isolate);
   __ li(a2, Operand(address_of_static_offsets_vector));
 
   // a1: number of capture registers
@@ -4800,6 +5118,29 @@
   __ Addu(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
+  // External string.  Short external strings have already been ruled out.
+  // a0: scratch
+  __ bind(&external_string);
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, a0, Operand(kIsIndirectStringMask));
+    __ Assert(eq,
+              "external string expected, but not found",
+              at,
+              Operand(zero_reg));
+  }
+  __ lw(subject,
+        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Subu(subject,
+          subject,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&seq_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4895,8 +5236,25 @@
 }
 
 
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+  code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  // a1 : the function to call
+  Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
@@ -4910,19 +5268,15 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
     __ Branch(&call, ne, t0, Operand(at));
     // Patch the receiver on the stack with the global receiver object.
-    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
-    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+    __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // function, receiver [, arguments]
-  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
   // Check that the function is really a JavaScript function.
   // a1: pushed function (to be verified)
-  __ JumpIfSmi(a1, &slow);
+  __ JumpIfSmi(a1, &non_function);
   // Get the map of the function object.
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -4950,8 +5304,22 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ push(a1);  // Put proxy as additional argument.
+  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
+  __ li(a2, Operand(0, RelocInfo::NONE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
   __ li(a0, Operand(argc_));  // Setup the number of arguments.
   __ mov(a2, zero_reg);
@@ -5008,7 +5376,6 @@
   Label got_char_code;
   Label sliced_string;
 
-  ASSERT(!t0.is(scratch_));
   ASSERT(!t0.is(index_));
   ASSERT(!t0.is(result_));
   ASSERT(!t0.is(object_));
@@ -5026,102 +5393,41 @@
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
 
-  // Put smi-tagged index into scratch register.
-  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
-  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+  __ sra(index_, index_, kSmiTagSize);
 
-  // Handle non-flat strings.
-  __ And(result_, result_, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
-  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
-  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
-  __ Branch(&call_runtime_, ne, result_, Operand(t0));
-
-  // Get the first of the two strings and load its instance type.
-  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
-  __ addu(scratch_, scratch_, result_);
-  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  __ bind(&assure_seq_string);
-  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // Check that parent is not an external string. Go to runtime otherwise.
-  STATIC_ASSERT(kSeqStringTag == 0);
-
-  __ And(t0, result_, Operand(kStringRepresentationMask));
-  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ And(t0, result_, Operand(kStringEncodingMask));
-  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register. We can
-  // add without shifting since the smi tag size is the log2 of the
-  // number of bytes in a two-byte character.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
-  __ Addu(scratch_, object_, Operand(scratch_));
-  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
-  __ Branch(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-
-  __ srl(t0, scratch_, kSmiTagSize);
-  __ Addu(scratch_, object_, t0);
-
-  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
   __ sll(result_, result_, kSmiTagSize);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              scratch_,
+              result_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   // Consumed by runtime conversion function:
-  __ Push(object_, index_, index_);
+  __ Push(object_, index_);
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
@@ -5133,16 +5439,14 @@
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
 
-  __ Move(scratch_, v0);
-
-  __ pop(index_);
+  __ Move(index_, v0);
   __ pop(object_);
   // Reload the instance type.
   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ Branch(&got_smi_index_);
 
@@ -5151,6 +5455,7 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
+  __ sll(index_, index_, kSmiTagSize);
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
 
@@ -5194,7 +5499,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5220,76 +5526,13 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
-class StringHelper : public AllStatic {
- public:
-  // Generate code for copying characters using a simple loop. This should only
-  // be used in places where the number of characters is small and the
-  // additional setup and checking in GenerateCopyCharactersLong adds too much
-  // overhead. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     bool ascii);
-
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharactersLong(MacroAssembler* masm,
-                                         Register dest,
-                                         Register src,
-                                         Register count,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Register scratch4,
-                                         Register scratch5,
-                                         int flags);
-
-
-  // Probe the symbol table for a two character string. If the string is
-  // not found by probing a jump to the label not_found is performed. This jump
-  // does not guarantee that the string is not in the symbol table. If the
-  // string is found the code falls through with the string in register r0.
-  // Contents of both c1 and c2 registers are modified. At the exit c1 is
-  // guaranteed to contain halfword with low and high bytes equal to
-  // initial contents of c1 and c2 respectively.
-  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                                   Register c1,
-                                                   Register c2,
-                                                   Register scratch1,
-                                                   Register scratch2,
-                                                   Register scratch3,
-                                                   Register scratch4,
-                                                   Register scratch5,
-                                                   Label* not_found);
-
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
                                           Register dest,
                                           Register src,
@@ -5540,10 +5783,10 @@
     __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
 
     __ Branch(not_found, eq, undefined, Operand(candidate));
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
-      __ Assert(eq, "oddball in symbol table is not undefined or null",
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or the hole",
           scratch, Operand(candidate));
     }
     __ jmp(&next_probe[i]);
@@ -5577,47 +5820,47 @@
 
 
 void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character) {
-  // hash = seed + character + ((seed + character) << 10);
-  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
-  // Untag smi seed and add the character.
-  __ SmiUntag(hash);
+                                      Register hash,
+                                      Register character) {
+  // hash = character + (character << 10);
+  __ sll(hash, character, 10);
   __ addu(hash, hash, character);
-  __ sll(at, hash, 10);
-  __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ sra(at, hash, 6);
+  __ srl(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character) {
+                                              Register hash,
+                                              Register character) {
   // hash += character;
   __ addu(hash, hash, character);
   // hash += hash << 10;
   __ sll(at, hash, 10);
   __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ sra(at, hash, 6);
+  __ srl(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash) {
+                                         Register hash) {
   // hash += hash << 3;
   __ sll(at, hash, 3);
   __ addu(hash, hash, at);
   // hash ^= hash >> 11;
-  __ sra(at, hash, 11);
+  __ srl(at, hash, 11);
   __ xor_(hash, hash, at);
   // hash += hash << 15;
   __ sll(at, hash, 15);
   __ addu(hash, hash, at);
 
+  uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+  __ li(at, Operand(kHashShiftCutOffMask));
+  __ and_(hash, hash, at);
+
   // if (hash == 0) hash = 27;
   __ ori(at, zero_reg, 27);
   __ movz(hash, at, hash);
@@ -5854,15 +6097,13 @@
     // a3: from index (untagged smi)
     // t2 (a.k.a. to): to (smi)
     // t3 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ And(t4, a1, Operand(kStringRepresentationMask));
-    __ Branch(&seq_string, eq, t4, Operand(zero_reg));
+    Label allocate_slice, sliced_string, seq_or_external_string;
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ And(t4, a1, Operand(kIsIndirectStringMask));
     // External string.  Jump to runtime.
-    __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
+    __ Branch(&seq_or_external_string, eq, t4, Operand(zero_reg));
 
     __ And(t4, a1, Operand(kSlicedNotConsMask));
     __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
@@ -5880,8 +6121,8 @@
     __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
     __ jmp(&allocate_slice);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(t1, v0);
 
     __ bind(&allocate_slice);
@@ -6467,39 +6708,25 @@
     __ Subu(a2, a0, Operand(kHeapObjectTag));
     __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
 
-    Label fpu_eq, fpu_lt, fpu_gt;
-    // Compare operands (test if unordered).
-    __ c(UN, D, f0, f2);
-    // Don't base result on status bits when a NaN is involved.
-    __ bc1t(&unordered);
-    __ nop();
+    // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+    Label fpu_eq, fpu_lt;
+    // Test if equal, and also handle the unordered/NaN case.
+    __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
 
-    // Test if equal.
-    __ c(EQ, D, f0, f2);
-    __ bc1t(&fpu_eq);
-    __ nop();
+    // Test if less (unordered case is already handled).
+    __ BranchF(&fpu_lt, NULL, lt, f0, f2);
 
-    // Test if unordered or less (unordered case is already handled).
-    __ c(ULT, D, f0, f2);
-    __ bc1t(&fpu_lt);
-    __ nop();
+    // Otherwise it's greater, so just fall thru, and return.
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(GREATER));  // In delay slot.
 
-    // Otherwise it's greater.
-    __ bc1f(&fpu_gt);
-    __ nop();
-
-    // Return a result of -1, 0, or 1.
     __ bind(&fpu_eq);
-    __ li(v0, Operand(EQUAL));
-    __ Ret();
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(EQUAL));  // In delay slot.
 
     __ bind(&fpu_lt);
-    __ li(v0, Operand(LESS));
-    __ Ret();
-
-    __ bind(&fpu_gt);
-    __ li(v0, Operand(GREATER));
-    __ Ret();
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(LESS));  // In delay slot.
 
     __ bind(&unordered);
   }
@@ -6650,12 +6877,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
                                              masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(a1, a0);
-  __ li(t0, Operand(Smi::FromInt(op_)));
-  __ push(t0);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a0);
+    __ li(t0, Operand(Smi::FromInt(op_)));
+    __ push(t0);
+    __ CallExternalReference(miss, 3);
+  }
   // Compute the entry point of the rewritten stub.
   __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   // Restore registers.
@@ -6672,7 +6900,7 @@
   // The saved ra is after the reserved stack space for the 4 args.
   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
 
-  if (FLAG_debug_code && EnableSlowAsserts()) {
+  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
     // In case of an error the return address may point to a memory area
     // filled with kZapValue by the GC.
     // Dereference the address and check for this.
@@ -6722,15 +6950,14 @@
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register receiver,
-    Register properties,
-    String* name,
-    Register scratch0) {
-// If names of slots in range from 1 to kProbes - 1 for the hash value are
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register receiver,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register scratch0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
@@ -6743,20 +6970,17 @@
     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
     __ Subu(index, index, Operand(1));
     __ And(index, index, Operand(
-         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    // index *= 3.
-    __ mov(at, index);
-    __ sll(index, index, 1);
+    __ sll(at, index, 1);
     __ Addu(index, index, at);
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
     ASSERT_EQ(kSmiTagSize, 1);
     Register tmp = properties;
-
     __ sll(scratch0, index, 1);
     __ Addu(tmp, properties, scratch0);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -6784,19 +7008,18 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
-       a2.bit() | a1.bit() | a0.bit());
+       a2.bit() | a1.bit() | a0.bit() | v0.bit());
 
   __ MultiPush(spill_mask);
   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ li(a1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
+  __ mov(at, v0);
   __ MultiPop(spill_mask);
 
-  __ Branch(done, eq, v0, Operand(zero_reg));
-  __ Branch(miss, ne, v0, Operand(zero_reg));
-  return result;
+  __ Branch(done, eq, at, Operand(zero_reg));
+  __ Branch(miss, ne, at, Operand(zero_reg));
 }
 
 
@@ -6811,6 +7034,11 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
+  ASSERT(!elements.is(scratch1));
+  ASSERT(!elements.is(scratch2));
+  ASSERT(!name.is(scratch1));
+  ASSERT(!name.is(scratch2));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6841,8 +7069,7 @@
     ASSERT(StringDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
 
-    __ mov(at, scratch2);
-    __ sll(scratch2, scratch2, 1);
+    __ sll(at, scratch2, 1);
     __ Addu(scratch2, scratch2, at);
 
     // Check if the key is identical to the name.
@@ -6854,23 +7081,32 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
-       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
       ~(scratch1.bit() | scratch2.bit());
 
   __ MultiPush(spill_mask);
-  __ Move(a0, elements);
-  __ Move(a1, name);
+  if (name.is(a0)) {
+    ASSERT(!elements.is(a1));
+    __ Move(a1, name);
+    __ Move(a0, elements);
+  } else {
+    __ Move(a0, elements);
+    __ Move(a1, name);
+  }
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
   __ mov(scratch2, a2);
+  __ mov(at, v0);
   __ MultiPop(spill_mask);
 
-  __ Branch(done, ne, v0, Operand(zero_reg));
-  __ Branch(miss, eq, v0, Operand(zero_reg));
+  __ Branch(done, ne, at, Operand(zero_reg));
+  __ Branch(miss, eq, at, Operand(zero_reg));
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  a1: key
@@ -6964,6 +7200,341 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { s2, s0, t3, EMIT_REMEMBERED_SET },
+  { s2, a2, t3, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+  // Also used in KeyedStoreIC::GenerateGeneric.
+  { a3, t0, t1, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { t0, a1, a2, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { a1, a2, a3, EMIT_REMEMBERED_SET },
+  { a3, a2, a1, EMIT_REMEMBERED_SET },
+  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { a2, a1, a3, EMIT_REMEMBERED_SET },
+  { a3, a1, a2, EMIT_REMEMBERED_SET },
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { t0, a2, a3, EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { a2, a3, t5, EMIT_REMEMBERED_SET },
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { t2, a2, a0, EMIT_REMEMBERED_SET },
+  { a2, t2, t5, EMIT_REMEMBERED_SET },
+  // StoreArrayLiteralElementStub::Generate
+  { t1, a0, t2, EMIT_REMEMBERED_SET },
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two branch+nop instructions are generated with labels so as to
+  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
+  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+  // incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+  __ nop();
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+  __ nop();
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(a0));
+  __ Move(address, regs_.address());
+  __ Move(a0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(a1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ lw(a1, MemOperand(address, 0));
+  }
+  __ li(a2, Operand(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : element value to store
+  //  -- a1    : array literal
+  //  -- a2    : map of array literal
+  //  -- a3    : element index as smi
+  //  -- t0    : array literal index in function as smi
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(a2, t1, &double_elements);
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(a0, &smi_element);
+  __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(a1, a3, a0);
+  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
+  __ Push(t1, t0);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sw(a0, MemOperand(t2, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
+                                 &slow_elements);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index aa224bc..e0954d8 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -59,6 +59,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -118,7 +137,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -217,7 +236,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -226,6 +245,70 @@
 };
 
 
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
@@ -324,7 +407,15 @@
       : the_int_(the_int),
         the_heap_number_(the_heap_number),
         scratch_(scratch),
-        sign_(scratch2) { }
+        sign_(scratch2) {
+    ASSERT(IntRegisterBits::is_valid(the_int_.code()));
+    ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
+    ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
+    ASSERT(SignRegisterBits::is_valid(sign_.code()));
+  }
+
+  bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
 
  private:
   Register the_int_;
@@ -336,13 +427,15 @@
   class IntRegisterBits: public BitField<int, 0, 4> {};
   class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
   class ScratchRegisterBits: public BitField<int, 8, 4> {};
+  class SignRegisterBits: public BitField<int, 12, 4> {};
 
   Major MajorKey() { return WriteInt32ToHeapNumber; }
   int MinorKey() {
     // Encode the parameters in a unique 16 bit value.
     return IntRegisterBits::encode(the_int_.code())
            | HeapNumberRegisterBits::encode(the_heap_number_.code())
-           | ScratchRegisterBits::encode(scratch_.code());
+           | ScratchRegisterBits::encode(scratch_.code())
+           | SignRegisterBits::encode(sign_.code());
   }
 
   void Generate(MacroAssembler* masm);
@@ -375,6 +468,208 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   2 * Assembler::kInstrSize);
+
+    if (Assembler::IsBeq(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsBne(first_instruction));
+
+    if (Assembler::IsBeq(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsBne(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(FPU);
+        masm->MultiPushFPU(kCallerSavedFPU);
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(FPU);
+        masm->MultiPopFPU(kCallerSavedFPU);
+      }
+      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    Register GetRegThatIsNotOneOf(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 5> {};
+  class ValueBits: public BitField<int, 5, 5> {};
+  class AddressBits: public BitField<int, 10, 5> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM and MIPS.
@@ -561,14 +856,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register receiver,
-      Register properties,
-      String* name,
-      Register scratch0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register receiver,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -578,6 +872,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -590,7 +886,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 4400b64..0b68384 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -30,22 +30,379 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
+
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  // Set transitioned map.
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required;
+  bool fpu_supported = CpuFeatures::IsSupported(FPU);
+  __ push(ra);
+
+  Register scratch = t6;
+
+  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: source FixedArray
+  // t1: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ sll(scratch, t1, 2);
+  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
+  __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+  // t2: destination FixedDoubleArray, not tagged as heap object
+  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
+  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+  // Set destination FixedDoubleArray's length.
+  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+  // Update receiver's map.
+
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ Addu(a3, t2, Operand(kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(a2,
+                      JSObject::kElementsOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+
+  // Prepare for conversion loop.
+  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
+  __ sll(t2, t1, 2);
+  __ Addu(t2, t2, t3);
+  __ li(t0, Operand(kHoleNanLower32));
+  __ li(t1, Operand(kHoleNanUpper32));
+  // t0: kHoleNanLower32
+  // t1: kHoleNanUpper32
+  // t2: end of destination FixedDoubleArray, not tagged
+  // t3: begin of FixedDoubleArray element fields, not tagged
+
+  if (!fpu_supported) __ Push(a1, a0);
+
+  __ Branch(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(ra);
+  __ Branch(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ lw(t5, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+  // t5: current element
+  __ JumpIfNotSmi(t5, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  __ SmiUntag(t5);
+  if (fpu_supported) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(t5, f0);
+    __ cvt_d_w(f0, f0);
+    __ sdc1(f0, MemOperand(t3));
+    __ Addu(t3, t3, kDoubleSize);
+  } else {
+    FloatingPointHelper::ConvertIntToDouble(masm,
+                                            t5,
+                                            FloatingPointHelper::kCoreRegisters,
+                                            f0,
+                                            a0,
+                                            a1,
+                                            t7,
+                                            f0);
+    __ sw(a0, MemOperand(t3));  // mantissa
+    __ sw(a1, MemOperand(t3, kIntSize));  // exponent
+    __ Addu(t3, t3, kDoubleSize);
+  }
+  __ Branch(&entry);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+  }
+  __ sw(t0, MemOperand(t3));  // mantissa
+  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
+  __ Addu(t3, t3, kDoubleSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, t3, Operand(t2));
+
+  if (!fpu_supported) __ Pop(a1, a0);
+  __ pop(ra);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : target map, scratch for subsequent call
+  //  -- t0    : scratch (elements)
+  // -----------------------------------
+  Label entry, loop, convert_hole, gc_required;
+  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: source FixedArray
+  // t1: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  __ sll(a0, t1, 1);
+  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
+  __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+  // t2: destination FixedArray, not tagged as heap object
+  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+  // Set destination FixedDoubleArray's length.
+  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+
+  // Prepare for conversion loop.
+  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
+  __ Addu(t2, t2, Operand(kHeapObjectTag));
+  __ sll(t1, t1, 1);
+  __ Addu(t1, a3, t1);
+  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses.
+  // a3: begin of destination FixedArray element fields, not tagged
+  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+  // t1: end of destination FixedArray, not tagged
+  // t2: destination FixedArray
+  // t3: the-hole pointer
+  // t5: heap number map
+  __ Branch(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+
+  __ Branch(fail);
+
+  __ bind(&loop);
+  __ lw(a1, MemOperand(t0));
+  __ Addu(t0, t0, kDoubleSize);
+  // a1: current element's upper 32 bit
+  // t0: address of next element's upper 32 bit
+  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
+  // a2: new heap number
+  __ lw(a0, MemOperand(t0, -12));
+  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
+  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
+  __ mov(a0, a3);
+  __ sw(a2, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+  __ RecordWrite(t2,
+                 a0,
+                 a2,
+                 kRAHasBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ Branch(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ sw(t3, MemOperand(a3));
+  __ Addu(a3, a3, kIntSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, a3, Operand(t1));
+
+  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
+  // Update receiver's map.
+  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ RecordWriteField(a2,
+                      HeapObject::kMapOffset,
+                      a3,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(a2,
+                      JSObject::kElementsOffset,
+                      t2,
+                      t5,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(ra);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ And(at, result, Operand(kIsIndirectStringMask));
+  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ And(at, result, Operand(kSlicedNotConsMask));
+  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ sra(at, result, kSmiTagSize);
+  __ Addu(index, index, at);
+  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+  __ Branch(call_runtime, ne, result, Operand(at));
+  // Get the first of the two strings and load its instance type.
+  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ Addu(string,
+          string,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, "external string expected, but not found",
+        at, Operand(zero_reg));
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ And(at, result, Operand(kShortExternalStringMask));
+  __ Branch(call_runtime, ne, at, Operand(zero_reg));
+  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label ascii, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(at, result, Operand(kStringEncodingMask));
+  __ Branch(&ascii, ne, at, Operand(zero_reg));
+  // Two-byte string.
+  __ sll(at, index, 1);
+  __ Addu(at, string, at);
+  __ lhu(result, MemOperand(at));
+  __ jmp(&done);
+  __ bind(&ascii);
+  // Ascii string.
+  __ Addu(at, string, index);
+  __ lbu(result, MemOperand(at));
+  __ bind(&done);
+}
+
+#undef __
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index a8de9c8..e704c4f 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -31,7 +31,6 @@
 
 
 #include "ast.h"
-#include "code-stubs-mips.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -71,26 +70,26 @@
                               int pos,
                               bool right_here = false);
 
-  // Constants related to patching of inlined load/store.
-  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
-    // This is in correlation with the padding in MacroAssembler::Abort.
-    return FLAG_debug_code ? 45 : 20;
-  }
-
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
-
-  static int GetInlinedNamedStoreInstructionsAfterPatch() {
-    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
-    // Magic number 5: instruction count after patched map load:
-    //  li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
-    return Isolate::Current()->inlined_write_barrier_size() + 5;
-  }
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index d76ae59..4f486c1 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -50,13 +50,13 @@
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
 // Use floating-point coprocessor instructions. This flag is raised when
 // -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
+const bool IsMipsSoftFloatABI = false;
 #elif(defined(__mips_soft_float) && __mips_soft_float != 0)
 // Not using floating-point coprocessor instructions. This flag is raised when
 // -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
 #else
-static const bool IsMipsSoftFloatABI = true;
+const bool IsMipsSoftFloatABI = true;
 #endif
 
 
@@ -74,46 +74,45 @@
 // Registers and FPURegisters.
 
 // Number of general purpose registers.
-static const int kNumRegisters = 32;
-static const int kInvalidRegister = -1;
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
 
 // Number of registers with HI, LO, and pc.
-static const int kNumSimuRegisters = 35;
+const int kNumSimuRegisters = 35;
 
 // In the simulator, the PC register is simulated as the 34th register.
-static const int kPCRegister = 34;
+const int kPCRegister = 34;
 
 // Number coprocessor registers.
-static const int kNumFPURegisters = 32;
-static const int kInvalidFPURegister = -1;
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
 
 // FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-static const int kFCSRRegister = 31;
-static const int kInvalidFPUControlRegister = -1;
-static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
 
 // FCSR constants.
-static const uint32_t kFCSRInexactFlagBit = 2;
-static const uint32_t kFCSRUnderflowFlagBit = 3;
-static const uint32_t kFCSROverflowFlagBit = 4;
-static const uint32_t kFCSRDivideByZeroFlagBit = 5;
-static const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
 
-static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
-static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
-static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
-static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
-static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
 
-static const uint32_t kFCSRFlagMask =
+const uint32_t kFCSRFlagMask =
     kFCSRInexactFlagMask |
     kFCSRUnderflowFlagMask |
     kFCSROverflowFlagMask |
     kFCSRDivideByZeroFlagMask |
     kFCSRInvalidOpFlagMask;
 
-static const uint32_t kFCSRExceptionFlagMask =
-    kFCSRFlagMask ^ kFCSRInexactFlagMask;
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -177,67 +176,66 @@
 //   instructions (see Assembler::stop()).
 // - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
 //   debugger.
-static const uint32_t kMaxWatchpointCode = 31;
-static const uint32_t kMaxStopCode = 127;
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
 STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
 
 
 // ----- Fields offset and length.
-static const int kOpcodeShift   = 26;
-static const int kOpcodeBits    = 6;
-static const int kRsShift       = 21;
-static const int kRsBits        = 5;
-static const int kRtShift       = 16;
-static const int kRtBits        = 5;
-static const int kRdShift       = 11;
-static const int kRdBits        = 5;
-static const int kSaShift       = 6;
-static const int kSaBits        = 5;
-static const int kFunctionShift = 0;
-static const int kFunctionBits  = 6;
-static const int kLuiShift      = 16;
+const int kOpcodeShift   = 26;
+const int kOpcodeBits    = 6;
+const int kRsShift       = 21;
+const int kRsBits        = 5;
+const int kRtShift       = 16;
+const int kRtBits        = 5;
+const int kRdShift       = 11;
+const int kRdBits        = 5;
+const int kSaShift       = 6;
+const int kSaBits        = 5;
+const int kFunctionShift = 0;
+const int kFunctionBits  = 6;
+const int kLuiShift      = 16;
 
-static const int kImm16Shift = 0;
-static const int kImm16Bits  = 16;
-static const int kImm26Shift = 0;
-static const int kImm26Bits  = 26;
-static const int kImm28Shift = 0;
-static const int kImm28Bits  = 28;
+const int kImm16Shift = 0;
+const int kImm16Bits  = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits  = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits  = 28;
 
 // In branches and jumps immediate fields point to words, not bytes,
 // and are therefore shifted by 2.
-static const int kImmFieldShift = 2;
+const int kImmFieldShift = 2;
 
-static const int kFsShift       = 11;
-static const int kFsBits        = 5;
-static const int kFtShift       = 16;
-static const int kFtBits        = 5;
-static const int kFdShift       = 6;
-static const int kFdBits        = 5;
-static const int kFCccShift     = 8;
-static const int kFCccBits      = 3;
-static const int kFBccShift     = 18;
-static const int kFBccBits      = 3;
-static const int kFBtrueShift   = 16;
-static const int kFBtrueBits    = 1;
+const int kFsShift       = 11;
+const int kFsBits        = 5;
+const int kFtShift       = 16;
+const int kFtBits        = 5;
+const int kFdShift       = 6;
+const int kFdBits        = 5;
+const int kFCccShift     = 8;
+const int kFCccBits      = 3;
+const int kFBccShift     = 18;
+const int kFBccBits      = 3;
+const int kFBtrueShift   = 16;
+const int kFBtrueBits    = 1;
 
 // ----- Miscellaneous useful masks.
 // Instruction bit masks.
-static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
-static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
-static const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
-static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
-static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
-static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
-static const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
-static const int  kFunctionFieldMask =
-    ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+const int  kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
 // Misc masks.
-static const int  kHiMask       =   0xffff << 16;
-static const int  kLoMask       =   0xffff;
-static const int  kSignMask     =   0x80000000;
-static const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int  kHiMask       =   0xffff << 16;
+const int  kLoMask       =   0xffff;
+const int  kSignMask     =   0x80000000;
+const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
 
 // ----- MIPS Opcodes and Function Fields.
 // We use this presentation to stay close to the table representation in
@@ -529,7 +527,7 @@
   kRoundToMinusInf = RM
 };
 
-static const uint32_t kFPURoundingModeMask = 3 << 0;
+const uint32_t kFPURoundingModeMask = 3 << 0;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
@@ -772,18 +770,18 @@
 // MIPS assembly various constants.
 
 // C/C++ argument slots size.
-static const int kCArgSlotCount = 4;
-static const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
 // JS argument slots size.
-static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
 // Assembly builtins argument slots size.
-static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
 
-static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-static const int kDoubleAlignmentBits = 3;
-static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
+const int kDoubleAlignmentBits = 3;
+const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+const int kDoubleAlignmentMask = kDoubleAlignment - 1;
 
 
 } }   // namespace v8::internal
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 26e95fb..5c3912e 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -64,15 +64,19 @@
   }
 
 #if !defined (USE_SIMULATOR)
+#if defined(ANDROID)
+  // Bionic cacheflush can typically run in userland, avoiding kernel call.
+  char *end = reinterpret_cast<char *>(start) + size;
+  cacheflush(
+    reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
+#else  // ANDROID
   int res;
-
   // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
-
   if (res) {
     V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
   }
-
+#endif  // ANDROID
 #else  // USE_SIMULATOR.
   // Not generating mips instructions for C-code. This means that we are
   // building a mips emulator based target.  We should notify the simulator
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index e323c50..34e333d 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -124,55 +124,58 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ And(at, reg, 0xc0000000);
-          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ And(at, reg, 0xc0000000);
+            __ Assert(
+                eq, "Unable to encode value as smi", at, Operand(zero_reg));
+          }
+          __ sll(reg, reg, kSmiTagSize);
         }
-        __ sll(reg, reg, kSmiTagSize);
       }
+      __ MultiPush(object_regs | non_object_regs);
     }
-    __ MultiPush(object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(a0, zero_reg);  // No arguments.
-  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ mov(a0, zero_reg);  // No arguments.
+    __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ MultiPop(object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ srl(reg, reg, kSmiTagSize);
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ li(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ MultiPop(object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ srl(reg, reg, kSmiTagSize);
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ li(reg, kDebugZapValue);
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
@@ -256,11 +259,11 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- a1 : function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0);
+  Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 18b6231..a27c61c 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -32,65 +32,748 @@
 #include "full-codegen.h"
 #include "safepoint-table.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 
-const int Deoptimizer::table_entry_size_ = 10;
+const int Deoptimizer::table_entry_size_ = 32;
 
 
 int Deoptimizer::patch_size() {
-  const int kCallInstructionSizeInWords = 3;
+  const int kCallInstructionSizeInWords = 4;
   return kCallInstructionSizeInWords * Assembler::kInstrSize;
 }
 
 
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  UNIMPLEMENTED();
+  HandleScope scope;
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+  Address code_start_address = code->instruction_start();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+                                                      RelocInfo::NONE);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+    ASSERT(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
+
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
+  }
+
+  Isolate* isolate = code->GetIsolate();
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  DeoptimizerData* data = isolate->deoptimizer_data();
+  node->set_next(data->deoptimizing_code_list_);
+  data->deoptimizing_code_list_ = node;
+
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+    if (FLAG_print_code) {
+      code->PrintLn();
+    }
+#endif
+  }
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
-  UNIMPLEMENTED();
+  const int kInstrSize = Assembler::kInstrSize;
+  // This structure comes from FullCodeGenerator::EmitStackCheck.
+  // The call of the stack guard check has the following form:
+  // sltu at, sp, t0
+  // beq at, zero_reg, ok
+  // lui t9, <stack guard address> upper
+  // ori t9, <stack guard address> lower
+  // jalr t9
+  // nop
+  // ----- pc_after points here
+
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  patcher.masm()->addiu(at, zero_reg, 1);
+
+  // Replace the stack check address in the load-immediate (lui/ori pair)
+  // with the entry address of the replacement code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(check_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   replacement_code->entry());
+
+  // We patched the code to the following form:
+  // addiu at, zero_reg, 1
+  // beq at, zero_reg, ok  ;; Not changed
+  // lui t9, <on-stack replacement address> upper
+  // ori t9, <on-stack replacement address> lower
+  // jalr t9  ;; Not changed
+  // nop  ;; Not changed
+  // ----- pc_after points here
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
-  UNIMPLEMENTED();
+  // Exact opposite of the function above.
+  const int kInstrSize = Assembler::kInstrSize;
+  ASSERT(Assembler::IsAddImmediate(
+      Assembler::instr_at(pc_after - 6 * kInstrSize)));
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+
+  // Restore the sltu instruction so beq can be taken again.
+  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+  patcher.masm()->sltu(at, sp, t0);
+
+  // Replace the on-stack replacement address in the load-immediate (lui/ori
+  // pair) with the entry address of the normal stack-check code.
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+      reinterpret_cast<uint32_t>(replacement_code->entry()));
+  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+                                   check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
 }
 
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
-  UNIMPLEMENTED();
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i =  StandardFrameConstants::kCallerPCOffset;
+       ok && i >=  StandardFrameConstants::kMarkerOffset;
+       i -= kPointerSize) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      const char* name = "UNKNOWN";
+      switch (i) {
+        case StandardFrameConstants::kCallerPCOffset:
+          name = "caller's pc";
+          break;
+        case StandardFrameConstants::kCallerFPOffset:
+          name = "fp";
+          break;
+        case StandardFrameConstants::kContextOffset:
+          name = "context";
+          break;
+        case StandardFrameConstants::kMarkerOffset:
+          name = "function";
+          break;
+      }
+      PrintF("    [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
+             output_offset,
+             input_value,
+             input_offset,
+             name);
+    }
+
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Setup the frame pointer and the context pointer.
+    output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
+    output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
 }
 
 
+// This code is very similar to ia32/arm code, but relies on register names
+// (fp, sp) and how the frame is laid out.
 void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
                                  int frame_index) {
-  UNIMPLEMENTED();
-}
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
 
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    output_frame->SetRegister(fp.code(), fp_value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // For the bottommost output frame the context can be gotten from the input
+  // frame. For all subsequent output frames it can be gotten from the function
+  // so long as we don't inline functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = reinterpret_cast<intptr_t>(function->context());
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) {
+    output_frame->SetRegister(cp.code(), value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost && bailout_type_ != DEBUGGER) {
+    Builtins* builtins = isolate_->builtins();
+    Code* continuation = (bailout_type_ == EAGER)
+        ? builtins->builtin(Builtins::kNotifyDeoptimized)
+        : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+}
 
 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  UNIMPLEMENTED();
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+  }
 }
 
 
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
 void Deoptimizer::EntryGenerator::Generate() {
-  UNIMPLEMENTED();
+  GeneratePrologue();
+
+  Isolate* isolate = masm()->isolate();
+
+  CpuFeatures::Scope scope(FPU);
+  // Unlike on ARM we don't save all the registers, just the useful ones.
+  // For the rest, there are gaps on the stack, so the offsets remain the same.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * FPURegister::kNumAllocatableRegisters;
+
+  // Save all FPU registers before messing with them.
+  __ Subu(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ sdc1(fpu_reg, MemOperand(sp, offset));
+  }
+
+  // Push saved_regs (needed to populate FrameDescription::registers_).
+  // Leave gaps for other registers.
+  __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+    if ((saved_regs & (1 << i)) != 0) {
+      __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+    }
+  }
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible (a3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register t0.
+  if (type() == EAGER) {
+    __ mov(a3, zero_reg);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else if (type() == OSR) {
+    __ mov(a3, ra);
+    // Correct one word for bailout id.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ mov(a3, ra);
+    // Correct two words for bailout id and return address.
+    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  __ Subu(t0, fp, t0);
+
+  // Allocate a new deoptimizer object.
+  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+  __ PrepareCallCFunction(6, t1);
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ li(a1, Operand(type()));  // bailout type,
+  // a2: bailout id already loaded.
+  // a3: code address or 0 already loaded.
+  __ sw(t0, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
+  __ li(t1, Operand(ExternalReference::isolate_address()));
+  __ sw(t1, CFunctionArgumentOperand(6));  // Isolate.
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
+
+  // Preserve "deoptimizer" object in register v0 and get the input
+  // frame descriptor pointer to a1 (deoptimizer->input_);
+  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+  __ mov(a0, v0);
+  __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((saved_regs & (1 << i)) != 0) {
+      __ lw(a2, MemOperand(sp, i * kPointerSize));
+      __ sw(a2, MemOperand(a1, offset));
+    } else if (FLAG_debug_code) {
+      __ li(a2, kDebugZapValue);
+      __ sw(a2, MemOperand(a1, offset));
+    }
+  }
+
+  // Copy FPU registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ ldc1(f0, MemOperand(sp, src_offset));
+    __ sdc1(f0, MemOperand(a1, dst_offset));
+  }
+
+  // Remove the bailout id, eventually return address, and the saved registers
+  // from the stack.
+  if (type() == EAGER || type() == OSR) {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  // Compute a pointer to the unwinding limit in register a2; that is
+  // the first stack slot not part of the input frame.
+  __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+  __ Addu(a2, a2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(t0);
+  __ sw(t0, MemOperand(a3, 0));
+  __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
+  __ addiu(a3, a3, sizeof(uint32_t));  // In delay slot.
+
+  // Compute the output frame in the deoptimizer.
+  __ push(a0);  // Preserve deoptimizer object across call.
+  // a0: deoptimizer object; a1: scratch.
+  __ PrepareCallCFunction(1, a1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
+  __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: a0 = current "FrameDescription** output_",
+  // a1 = one past the last FrameDescription**.
+  __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset()));  // a0 is output_.
+  __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
+  __ addu(a1, a0, a1);  // a1 = one past the last FrameDescription**.
+  __ bind(&outer_push_loop);
+  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+  __ lw(a2, MemOperand(a0, 0));  // output_[ix]
+  __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+  __ Addu(t2, a2, Operand(a3));
+  __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+  __ push(t3);
+  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+  __ Addu(a0, a0, Operand(kPointerSize));
+  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+    __ push(t2);
+  }
+
+  __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+  __ push(t2);
+  __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+  __ push(t2);
+
+
+  // Technically restoring 'at' should work unless zero_reg is also restored
+  // but it's safer to check for this.
+  ASSERT(!(at.bit() & restored_regs));
+  // Restore the registers from the last output frame.
+  __ mov(at, a2);
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ lw(ToRegister(i), MemOperand(at, offset));
+    }
+  }
+
+  // Set up the roots register.
+  ExternalReference roots_array_start =
+      ExternalReference::roots_array_start(isolate);
+  __ li(roots, Operand(roots_array_start));
+
+  __ pop(at);  // Get continuation, leave pc on stack.
+  __ pop(ra);
+  __ Jump(at);
+  __ stop("Unreachable.");
 }
 
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
-  UNIMPLEMENTED();
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    if (type() != EAGER) {
+      // Emulate ia32 like call by pushing return address to stack.
+      __ push(ra);
+    }
+    __ li(at, Operand(i));
+    __ push(at);
+    __ Branch(&done);
+
+    // Pad the rest of the code.
+    while (table_entry_size_ > (masm()->pc_offset() - start)) {
+      __ nop();
+    }
+
+    ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
+  }
+  __ bind(&done);
 }
 
+#undef __
+
 
 } }  // namespace v8::internal
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 2c83893..9e626f3 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -36,9 +36,9 @@
 // Register lists.
 // Note that the bit values must match those used in actual instruction
 // encoding.
-static const int kNumRegs = 32;
+const int kNumRegs = 32;
 
-static const RegList kJSCallerSaved =
+const RegList kJSCallerSaved =
   1 << 2  |  // v0
   1 << 3  |  // v1
   1 << 4  |  // a0
@@ -54,7 +54,7 @@
   1 << 14 |  // t6
   1 << 15;   // t7
 
-static const int kNumJSCallerSaved = 14;
+const int kNumJSCallerSaved = 14;
 
 
 // Return the code of the n-th caller-saved register available to JavaScript
@@ -63,7 +63,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript.
-static const RegList kCalleeSaved =
+const RegList kCalleeSaved =
   1 << 16 |  // s0
   1 << 17 |  // s1
   1 << 18 |  // s2
@@ -74,9 +74,9 @@
   1 << 23 |  // s7 (cp in Javascript code)
   1 << 30;   // fp/s8
 
-static const int kNumCalleeSaved = 9;
+const int kNumCalleeSaved = 9;
 
-static const RegList kCalleeSavedFPU =
+const RegList kCalleeSavedFPU =
   1 << 20 |  // f20
   1 << 22 |  // f22
   1 << 24 |  // f24
@@ -84,23 +84,37 @@
   1 << 28 |  // f28
   1 << 30;   // f30
 
-static const int kNumCalleeSavedFPU = 6;
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU =
+  1 << 0  |  // f0
+  1 << 2  |  // f2
+  1 << 4  |  // f4
+  1 << 6  |  // f6
+  1 << 8  |  // f8
+  1 << 10 |  // f10
+  1 << 12 |  // f12
+  1 << 14 |  // f14
+  1 << 16 |  // f16
+  1 << 18;   // f18
+
+
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
-static const int kNumSafepointRegisters = 24;
+const int kNumSafepointRegisters = 24;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-static const int kNumSafepointSavedRegisters =
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters =
     kNumJSCallerSaved + kNumCalleeSaved;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
-static const int kUndefIndex = -1;
+const int kUndefIndex = -1;
 // Map with indexes on stack that corresponds to codes of saved registers.
-static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {
   kUndefIndex,  // zero_reg
   kUndefIndex,  // at
   0,   // v0
@@ -140,13 +154,13 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kStateOffset   = 1 * kPointerSize;
-  static const int kContextOffset = 2 * kPointerSize;
-  static const int kFPOffset      = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 9a210c4..f5b851d 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -47,6 +47,7 @@
 #include "stub-cache.h"
 
 #include "mips/code-stubs-mips.h"
+#include "mips/macro-assembler-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -54,17 +55,14 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
-// bit immediate value is used) is the delta from the pc to the first
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
 // instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -103,7 +101,7 @@
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
       Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
-      __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+      __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
 #ifdef DEBUG
       info_emitted_ = true;
 #endif
@@ -139,6 +137,8 @@
   ASSERT(info_ == NULL);
   info_ = info;
   scope_ = info->scope();
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -153,7 +153,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). t1 is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ Branch(&ok, eq, t1, Operand(zero_reg));
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
@@ -162,6 +162,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(ra, fp, cp, a1);
@@ -207,14 +212,12 @@
         // Load parameter from stack.
         __ lw(a0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ li(a1, Operand(Context::SlotOffset(var->index())));
-        __ addu(a2, cp, a1);
-        __ sw(a0, MemOperand(a2, 0));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(a2, cp);
-        __ RecordWrite(a2, a1, a3);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sw(a0, target);
+
+        // Update the write barrier.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     }
   }
@@ -242,7 +245,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (is_strict_mode()) {
+    if (!is_classic_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -272,7 +275,10 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -310,17 +316,25 @@
 
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+  // to make sure it is constant. Branch may emit a skip-or-jump sequence
+  // instead of the normal Branch. It seems that the "skip" part of that
+  // sequence is about as long as this Branch would be so it is safe to ignore
+  // that.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
   __ LoadRoot(t0, Heap::kStackLimitRootIndex);
-  __ Branch(&ok, hs, sp, Operand(t0));
+  __ sltu(at, sp, t0);
+  __ beq(at, zero_reg, &ok);
+  // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
   StackCheckStub stub;
+  __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
 
-  __ CallStub(&stub);
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -393,7 +407,7 @@
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -416,7 +430,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -451,7 +465,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -510,7 +524,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -577,7 +591,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -670,15 +684,17 @@
   __ sw(src, location);
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWrite(scratch0,
-                   Operand(Context::SlotOffset(var->index())),
-                   scratch1,
-                   src);
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kRAHasBeenSaved,
+                              kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -689,13 +705,7 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ LoadRoot(t0, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -705,13 +715,15 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
+                                        VariableMode mode,
                                         FunctionLiteral* function,
                                         int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -723,7 +735,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ sw(result_register(), StackOperand(variable));
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
           __ sw(t0, StackOperand(variable));
@@ -750,10 +762,16 @@
         __ sw(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ mov(a1, cp);
-        __ RecordWrite(a1, Operand(offset), a2, result_register());
+        __ RecordWriteContextSlot(cp,
+                                  offset,
+                                  result_register(),
+                                  a2,
+                                  kRAHasBeenSaved,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sw(at, ContextOperand(cp, variable->index()));
@@ -765,11 +783,13 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ li(a2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
+        ? READ_ONLY : NONE;
       __ li(a1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -779,7 +799,7 @@
         __ Push(cp, a2, a1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
           __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
           __ Push(cp, a2, a1, a0);
       } else {
@@ -922,11 +942,17 @@
   __ bind(&done_convert);
   __ push(a0);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
+  Label next;
   // Preload a couple of values used in the loop.
   Register  empty_fixed_array_value = t2;
   __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -1000,9 +1026,16 @@
   __ jmp(&loop);
 
   // We got a fixed array in register v0. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
-  __ Push(a1, v0);
+  __ li(a1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
+  __ lw(a2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a2, a3, a3);
+  __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
+  __ li(a1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(a1, v0);  // Smi and array
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ li(a0, Operand(Smi::FromInt(0)));
   __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
@@ -1021,17 +1054,22 @@
   __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
   __ lw(a3, MemOperand(t0));  // Current entry.
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register a2.
   __ lw(a2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ lw(a1, MemOperand(sp, 4 * kPointerSize));
   __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, t0, Operand(a2));
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  ASSERT_EQ(Smi::FromInt(0), 0);
+  __ Branch(&update_each, eq, a2, Operand(zero_reg));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1086,7 +1124,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ li(a0, Operand(info));
     __ push(a0);
     __ CallStub(&stub);
@@ -1117,7 +1155,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1129,7 +1167,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1171,7 +1209,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1201,17 +1239,26 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ Branch(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-      __ movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+      if (local->mode() == CONST) {
+        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+        __ movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+      } else {  // LET || CONST_HARMONY
+        __ Branch(done, ne, at, Operand(zero_reg));
+        __ li(a0, Operand(var->name()));
+        __ push(a0);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ Branch(done);
   }
@@ -1244,26 +1291,66 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        GetVar(v0, var);
-        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-        if (var->mode() == Variable::LET) {
-          Label done;
-          __ Branch(&done, ne, at, Operand(zero_reg));
-          __ li(a0, Operand(var->name()));
-          __ push(a0);
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          __ bind(&done);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
         } else {
-          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-          __ movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        context()->Plug(v0);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(v0, var);
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ Branch(&done, ne, at, Operand(zero_reg));
+            __ li(a0, Operand(var->name()));
+            __ push(a0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+            __ movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+          }
+          context()->Plug(v0);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1337,10 +1424,11 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->constant_properties()));
+  __ li(a1, Operand(constant_properties));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1349,10 +1437,15 @@
       : ObjectLiteral::kNoFlags;
   __ li(a0, Operand(Smi::FromInt(flags)));
   __ Push(a3, a2, a1, a0);
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1387,9 +1480,9 @@
             __ mov(a0, result_register());
             __ li(a2, Operand(key->handle()));
             __ lw(a1, MemOperand(sp));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
             __ Call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1448,13 +1541,22 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
+
   __ mov(a0, result_register());
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(expr->constant_elements()));
+  __ li(a1, Operand(constant_elements));
   __ Push(a3, a2, a1);
-  if (expr->constant_elements()->map() ==
+  if (has_fast_elements && constant_elements_values->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1466,8 +1568,13 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode = has_fast_elements
+      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1488,21 +1595,30 @@
       __ push(v0);
       result_saved = true;
     }
+
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ lw(a1, MemOperand(sp));  // Copy of array literal.
-    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ sw(result_register(), FieldMemOperand(a1, offset));
-
-    // Update the write barrier for the array store with v0 as the scratch
-    // register.
-    __ RecordWrite(a1, Operand(offset), a2, result_register());
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ lw(t2, MemOperand(sp));  // Copy of array literal.
+      __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
+      __ sw(result_register(), FieldMemOperand(a1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(a1, offset, result_register(), a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ lw(a1, MemOperand(sp));  // Copy of array literal.
+      __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+      __ li(a3, Operand(Smi::FromInt(i)));
+      __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
+      __ mov(a0, result_register());
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1632,7 +1748,7 @@
   __ li(a2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1641,7 +1757,7 @@
   __ mov(a0, result_register());
   // Call keyed load IC. It has arguments key and receiver in a0 and a1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1790,9 +1906,9 @@
       __ mov(a1, result_register());
       __ pop(a0);  // Restore value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ Call(ic);
       break;
     }
@@ -1803,9 +1919,9 @@
       __ mov(a1, result_register());
       __ pop(a2);
       __ pop(a0);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-        : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize()
+        : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ Call(ic);
       break;
     }
@@ -1822,9 +1938,9 @@
     __ mov(a0, result_register());
     __ li(a2, Operand(var->name()));
     __ lw(a1, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1850,12 +1966,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ li(a0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -1875,12 +1991,14 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(a3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWrite(a1, Operand(offset), a2, a3);
+        __ RecordWriteContextSlot(
+            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, a1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1893,13 +2011,15 @@
       __ sw(v0, location);
       if (var->IsContextSlot()) {
         __ mov(a3, v0);
-        __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(
+            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+      __ li(a0, Operand(Smi::FromInt(language_mode())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -1937,9 +2057,9 @@
     __ pop(a1);
   }
 
-  Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
   __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1989,9 +2109,9 @@
     __ pop(a2);
   }
 
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
   __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2097,6 +2217,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2105,8 +2226,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2115,22 +2235,20 @@
   }
   __ push(a1);
 
-  // Push the receiver of the enclosing function and do runtime call.
+  // Push the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(a1);
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ li(a1, Operand(Smi::FromInt(strict_mode)));
+  // Push the language mode.
+  __ li(a1, Operand(Smi::FromInt(language_mode())));
   __ push(a1);
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+  __ push(a1);
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2164,28 +2282,11 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly
-      // in generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(v0);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(a1);
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in v0 (function) and
       // v1 (receiver). Touch up the stack with the right values.
@@ -2195,6 +2296,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2308,7 +2410,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2320,7 +2423,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(t0, v0, Operand(kSmiTagMask));
   Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2328,7 +2431,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2340,7 +2444,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2348,7 +2452,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2370,7 +2475,7 @@
   __ Branch(if_false, ne, at, Operand(zero_reg));
   __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
   __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2378,7 +2483,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2392,7 +2498,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2400,7 +2506,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2416,7 +2523,7 @@
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
   __ And(at, a1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2424,8 +2531,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2501,12 +2608,13 @@
   __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2520,7 +2628,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
   __ Branch(if_false);
 
@@ -2528,7 +2636,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2542,7 +2651,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_ARRAY_TYPE),
         if_true, if_false, fall_through);
 
@@ -2550,7 +2659,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2564,15 +2674,15 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2594,7 +2704,7 @@
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
   __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
         if_true, if_false, fall_through);
 
@@ -2602,7 +2712,8 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2617,14 +2728,15 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(a1);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in a1 and the formal
@@ -2638,9 +2750,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2660,7 +2771,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2671,18 +2783,23 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ GetObjectType(v0, v0, a1);  // Map is now in v0.
   __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Check if the constructor in the map is a function.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+  // Check if the constructor in the map is a JS function.
   __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
   __ GetObjectType(v0, a1, a1);
   __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2714,7 +2831,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2722,6 +2839,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2735,9 +2853,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2760,10 +2877,10 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(FPU)) {
     __ PrepareCallCFunction(1, a0);
-    __ li(a0, Operand(ExternalReference::isolate_address()));
+    __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
-
     CpuFeatures::Scope scope(FPU);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
     __ li(a1, Operand(0x41300000));
@@ -2778,7 +2895,8 @@
   } else {
     __ PrepareCallCFunction(2, a0);
     __ mov(a0, s0);
-    __ li(a1, Operand(ExternalReference::isolate_address()));
+    __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2787,9 +2905,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2799,9 +2918,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2812,7 +2932,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2831,8 +2952,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2842,7 +2964,8 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2861,14 +2984,17 @@
   __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+  __ mov(a2, v0);
+  __ RecordWriteField(
+      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2880,7 +3006,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2898,7 +3025,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2907,7 +3035,6 @@
 
   Register object = a1;
   Register index = a0;
-  Register scratch = a2;
   Register result = v0;
 
   __ pop(object);
@@ -2917,7 +3044,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2946,7 +3072,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2955,8 +3082,7 @@
 
   Register object = a1;
   Register index = a0;
-  Register scratch1 = a2;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
 
   __ pop(object);
@@ -2966,8 +3092,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2996,9 +3121,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
-
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3008,7 +3133,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3020,10 +3146,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3032,10 +3159,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3044,10 +3172,24 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3056,8 +3198,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3065,7 +3208,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3074,18 +3218,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+
   // InvokeFunction requires the function in a1. Move it in there.
   __ mov(a1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(a1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(v0);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3095,7 +3252,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3154,16 +3312,31 @@
   __ sw(scratch1, MemOperand(index2, 0));
   __ sw(scratch2, MemOperand(index1, 0));
 
-  Label new_space;
-  __ InNewSpace(elements, scratch1, eq, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   scratch1,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   ne,
+                   &no_remembered_set);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask).
 
-  __ mov(scratch1, elements);
-  __ RecordWriteHelper(elements, index1, scratch2);
-  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index1,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index2,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3177,7 +3350,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3230,7 +3404,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = v0;
@@ -3246,8 +3421,7 @@
   __ Branch(&ok, eq, left, Operand(right));
   // Fail if either is a non-HeapObject.
   __ And(tmp, left, Operand(right));
-  __ And(at, tmp, Operand(kSmiTagMask));
-  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ JumpIfSmi(tmp, &fail);
   __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
   __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
   __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
@@ -3267,7 +3441,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3280,14 +3455,15 @@
   __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
   __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3302,12 +3478,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3592,7 +3768,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
         __ push(a1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(v0);
@@ -3600,7 +3778,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ lw(a2, GlobalObjectOperand());
           __ li(a1, Operand(var->name()));
@@ -3643,18 +3821,35 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ bind(&done);
       }
       break;
     }
@@ -3849,9 +4044,9 @@
       __ mov(a0, result_register());  // Value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
       __ pop(a1);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3867,9 +4062,9 @@
       __ mov(a0, result_register());  // Value.
       __ pop(a1);  // Key.
       __ pop(a2);  // Receiver.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3916,19 +4111,24 @@
     context()->Plug(v0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(v0, if_true);
@@ -3964,10 +4164,11 @@
     Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(v0, if_false);
-    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
-    Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
-        if_true, if_false, fall_through);
-
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+    Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+          if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(v0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3986,18 +4187,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4005,9 +4195,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4015,20 +4208,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
       break;
@@ -4037,7 +4223,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       // The stub returns 0 for true.
       Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
       break;
@@ -4050,36 +4236,26 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = eq;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::LT:
           cc = lt;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::GT:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = lt;
-          __ mov(a1, result_register());
-          __ pop(a0);
+          cc = gt;
          break;
         case Token::LTE:
-          // Reverse left and right sides to obtain ECMA-262 conversion order.
-          cc = ge;
-          __ mov(a1, result_register());
-          __ pop(a0);
+          cc = le;
           break;
         case Token::GTE:
           cc = ge;
-          __ mov(a0, result_register());
-          __ pop(a1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ mov(a0, result_register());
+      __ pop(a1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4095,7 +4271,7 @@
       Handle<Code> ic = CompareIC::GetUninitialized(op);
       __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
     }
   }
@@ -4106,8 +4282,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4115,18 +4292,23 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
   __ mov(a0, result_register());
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  __ LoadRoot(a1, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(a1, other_nil_value);
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ And(at, a0, Operand(kSmiTagMask));
-    __ Branch(if_false, eq, at, Operand(zero_reg));
+    __ JumpIfSmi(a0, if_false);
     // It can be an undetectable object.
     __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index a76c215..b057695 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -210,7 +210,8 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1);
+  __ RecordWrite(
+      elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
 }
 
 
@@ -383,10 +384,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- a1    : receiver
   //  -- a2    : name
@@ -396,7 +397,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
@@ -462,7 +463,7 @@
 }
 
 
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -485,10 +486,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -504,29 +505,29 @@
   // Get the receiver of the function from the stack.
   __ lw(a3, MemOperand(sp, argc*kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(a3, a2);
+    // Push the receiver and the name of the function.
+    __ Push(a3, a2);
 
-  // Call the entry.
-  __ li(a0, Operand(2));
-  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ li(a0, Operand(2));
+    __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to a1 and leave the internal frame.
-  __ mov(a1, v0);
-  __ LeaveInternalFrame();
+    // Move result to a1 and leave the internal frame.
+    __ mov(a1, v0);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ lw(a2, MemOperand(sp, argc * kPointerSize));
-    __ andi(t0, a2, kSmiTagMask);
-    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ JumpIfSmi(a2, &invoke);
     __ GetObjectType(a2, a3, a3);
     __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
     __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -538,7 +539,7 @@
     __ bind(&invoke);
   }
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -550,18 +551,6 @@
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -577,27 +566,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
@@ -649,12 +617,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
-  __ EnterInternalFrame();
-  __ push(a2);  // Save the key.
-  __ Push(a1, a2);  // Pass the receiver and the key.
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(a2);  // Restore the key.
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a2);  // Save the key.
+    __ Push(a1, a2);  // Pass the receiver and the key.
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(a2);  // Restore the key.
+  }
   __ mov(a1, v0);
   __ jmp(&do_call);
 
@@ -713,7 +682,7 @@
   __ JumpIfSmi(a2, &miss);
   __ IsObjectJSStringType(a2, a0, &miss);
 
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -902,9 +871,9 @@
   MemOperand mapped_location =
       GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
   __ sw(a0, mapped_location);
-  // Verify mapped_location MemOperand is register, with no offset.
-  ASSERT_EQ(mapped_location.offset(), 0);
-  __ RecordWrite(a3, mapped_location.rm(), t5);
+  __ Addu(t2, a3, t1);
+  __ mov(t5, a0);
+  __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&notin);
@@ -912,8 +881,9 @@
   MemOperand unmapped_location =
       GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
   __ sw(a0, unmapped_location);
-  ASSERT_EQ(unmapped_location.offset(), 0);
-  __ RecordWrite(a3, unmapped_location.rm(), t5);
+  __ Addu(t2, a3, t0);
+  __ mov(t5, a0);
+  __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&slow);
@@ -1150,14 +1120,12 @@
 
   Register receiver = a1;
   Register index = a0;
-  Register scratch1 = a2;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1201,109 +1169,144 @@
   //  -- a2     : receiver
   //  -- ra     : return address
   // -----------------------------------
-
-  Label slow, fast, array, extra, exit;
+  Label slow, array, extra, check_if_double_array;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
 
   // Register usage.
   Register value = a0;
   Register key = a1;
   Register receiver = a2;
   Register elements = a3;  // Elements array of the receiver.
-  // t0 is used as ip in the arm version.
-  // t3-t4 are used as temporaries.
+  Register elements_map = t2;
+  Register receiver_map = t3;
+  // t0 and t1 are used as general scratch registers.
 
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &slow);
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
-
   // Get the map of the object.
-  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
   __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
   __ Branch(&slow, ne, t0, Operand(zero_reg));
   // Check if the object is a JS array or not.
-  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
-
-  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
   // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
-  __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
 
   // Object case: Check key against length in the elements array.
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&slow, ne, t3, Operand(t0));
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast, lo, key, Operand(t0));
-  // Fall thru to slow if un-tagged index >= length.
+  __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
-
   // Entry registers are intact.
   // a0: value.
   // a1: key.
   // a2: receiver.
-
   GenerateRuntimeSetProperty(masm, strict_mode);
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
-
   __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
   // Only support writing to array[array.length].
   __ Branch(&slow, ne, key, Operand(t0));
   // Check for room in the elements backing store.
   // Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ Branch(&slow, hs, key, Operand(t0));
+  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(&check_if_double_array, ne, elements_map,
+      Operand(masm->isolate()->factory()->fixed_array_map()));
   // Calculate key + 1 as smi.
-  STATIC_ASSERT(0 == kSmiTag);
-  __ Addu(t3, key, Operand(Smi::FromInt(1)));
-  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&fast);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Addu(t0, key, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast_object_without_map_check);
 
+  __ bind(&check_if_double_array);
+  __ Branch(&slow, ne, elements_map,
+      Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  // Add 1 to key, and go to common element store code for doubles.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Addu(t0, key, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
-
   __ bind(&array);
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&slow, ne, t3, Operand(t0));
 
   // Check the key against the length in the array.
   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ Branch(&extra, hs, key, Operand(t0));
   // Fall through to fast case.
 
-  __ bind(&fast);
-  // Fast case, store the value to the elements backing store.
-  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t4, t4, Operand(t1));
-  __ sw(value, MemOperand(t4));
-  // Skip write barrier if the written value is a smi.
-  __ JumpIfSmi(value, &exit);
+  __ bind(&fast_object_with_map_check);
+  Register scratch_value = t0;
+  Register address = t1;
+  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(&fast_double_with_map_check, ne, elements_map,
+      Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);
 
+  __ bind(&non_smi_value);
+  // Escape to slow case when writing non-smi into smi-only array.
+  __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+  // Fast elements array, store the value to the elements backing store.
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
   // Update write barrier for the elements array address.
-  __ Subu(t3, t4, Operand(elements));
-
-  __ RecordWrite(elements, Operand(t3), t4, t5);
-  __ bind(&exit);
-
-  __ mov(v0, a0);  // Return the value written.
+  __ mov(v0, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements,
+                 address,
+                 value,
+                 kRAHasNotBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
   __ Ret();
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ Branch(&slow, ne, elements_map,
+      Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value,
+                                 key,
+                                 receiver,
+                                 elements,
+                                 t0,
+                                 t1,
+                                 t2,
+                                 t3,
+                                 &slow);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);
 }
 
 
@@ -1382,6 +1385,47 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a2     : receiver
+  //  -- a3     : target map
+  //  -- ra     : return address
+  // -----------------------------------
+  // Must return the modified receiver in v0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a2);
+    __ bind(&fail);
+  }
+
+  __ push(a2);
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a2     : receiver
+  //  -- a3     : target map
+  //  -- ra     : return address
+  // -----------------------------------
+  // Must return the modified receiver in v0.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a2);
+    __ bind(&fail);
+  }
+
+  __ push(a2);
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
   // ----------- S t a t e -------------
@@ -1521,11 +1565,9 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return lt;
+      return gt;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return ge;
+      return le;
     case Token::GTE:
       return ge;
     default:
@@ -1572,7 +1614,8 @@
   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   // was inlined.
   Instr instr = Assembler::instr_at(andi_instruction_address);
-  if (!Assembler::IsAndImmediate(instr)) {
+  if (!(Assembler::IsAndImmediate(instr) &&
+        Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
     return;
   }
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
new file mode 100644
index 0000000..848cf3e
--- /dev/null
+++ b/src/mips/lithium-codegen-mips.cc
@@ -0,0 +1,4643 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-codegen-mips.h"
+#include "mips/lithium-gap-resolver-mips.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void BeforeCall(int call_size) const { }
+
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(FPU);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
+#endif
+
+  // a1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
+
+  // Strict mode functions and builtins need to replace the receiver
+  // with undefined when called as functions (without an explicit
+  // receiver object). r5 is zero for method calls and non-zero for
+  // function calls.
+  if (!info_->is_classic_mode() || info_->is_native()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  __ Push(ra, fp, cp, a1);
+  __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ li(a0, Operand(slots));
+      __ li(a2, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ push(a2);
+      __ Subu(a0, a0, 1);
+      __ Branch(&loop, ne, a0, Operand(zero_reg));
+    } else {
+      __ Subu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  // Possibly allocate a local context.
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment(";;; Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sw(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  EnsureSpaceForLazyDeopt();
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
+      code->Generate();
+      __ jmp(code->exit());
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+  // TODO(plind): not clear that this will have advantage for MIPS.
+  // Skipping it for now. Raised issue #100 for this.
+  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
+  return false;
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    __ li(scratch, ToOperand(op));
+    return scratch;
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    __ lw(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else if (r.IsTagged()) {
+      Abort("unsupported tagged immediate");
+    }
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      return Operand(static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort("ToOperand Unsupported double immediate.");
+    }
+    ASSERT(r.IsTagged());
+    return Operand(literal);
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort("ToOperand IsDoubleRegister unimplemented");
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  ASSERT(!op->IsRegister());
+  ASSERT(!op->IsDoubleRegister());
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return MemOperand(fp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  ASSERT(op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, context,
+    // and the first word of the double in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address and the first word of
+    // the double.
+    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (environment->spilled_registers() != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          environment->spilled_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(translation,
+                         environment->spilled_registers()[value->index()],
+                         environment->HasTaggedValueAt(i));
+      } else if (
+          value->IsDoubleRegister() &&
+          environment->spilled_double_registers()[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        AddToTranslation(
+            translation,
+            environment->spilled_double_registers()[value->index()],
+            false);
+      }
+    }
+
+    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = GetStackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc,
+                            LEnvironment* environment,
+                            Register src1,
+                            const Operand& src2) {
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
+
+  if (FLAG_deopt_every_n_times == 1 &&
+      info_->shared_info()->opt_count() == id) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    return;
+  }
+
+  if (FLAG_trap_on_deopt) {
+    Label skip;
+    if (cc != al) {
+      __ Branch(&skip, NegateCondition(cc), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  if (cc == al) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    // TODO(plind): The Arm port is a little different here, due to their
+    // DeOpt jump table, which is not used for Mips yet.
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      factory()->NewDeoptimizationInputData(length, TENURED);
+
+  Handle<ByteArray> translations = translations_.CreateByteArray();
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  ASSERT(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  if (kind & Safepoint::kWithRegisters) {
+    // Register cp always contains a pointer to the context.
+    safepoint.DefinePointerRegister(cp);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(RelocInfo::kNoPosition);
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+    LPointerMap* pointers,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      __ lw(a0, MemOperand(sp, 0));
+      TranscendentalCacheStub stub(instr->transcendental_type(),
+                                   TranscendentalCacheStub::TAGGED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Register scratch = scratch0();
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register result = ToRegister(instr->result());
+
+  Label done;
+
+  if (instr->hydrogen()->HasPowerOf2Divisor()) {
+    Register scratch = scratch0();
+    ASSERT(!left.is(scratch));
+    __ mov(scratch, left);
+    int32_t p2constant = HConstant::cast(
+        instr->hydrogen()->right())->Integer32Value();
+    ASSERT(p2constant != 0);
+    // Result always takes the sign of the dividend (left).
+    p2constant = abs(p2constant);
+
+    Label positive_dividend;
+    __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
+    __ subu(result, zero_reg, left);
+    __ And(result, result, p2constant - 1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ subu(result, zero_reg, result);
+    __ bind(&positive_dividend);
+    __ And(result, scratch, p2constant - 1);
+  } else {
+    // div runs in the background while we check for special cases.
+    Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+    __ div(left, right);
+
+    // Check for x % 0.
+    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+      DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+    }
+
+    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+    __ mfhi(result);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register right = ToRegister(instr->InputAt(1));
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ div(left, right);
+
+  // Check for x / 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
+    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  __ mfhi(result);
+  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+  __ mflo(result);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->InputAt(0));
+  LOperand* right_op = instr->InputAt(1);
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (right_op->IsConstantOperand() && !can_overflow) {
+    // Use optimized code for specific constants.
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        __ Subu(result, zero_reg, left);
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (IsPowerOf2(constant_abs) ||
+            IsPowerOf2(constant_abs - 1) ||
+            IsPowerOf2(constant_abs + 1)) {
+          if (IsPowerOf2(constant_abs)) {
+            int32_t shift = WhichPowerOf2(constant_abs);
+            __ sll(result, left, shift);
+          } else if (IsPowerOf2(constant_abs - 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs - 1);
+            __ sll(result, left, shift);
+            __ Addu(result, result, left);
+          } else if (IsPowerOf2(constant_abs + 1)) {
+            int32_t shift = WhichPowerOf2(constant_abs + 1);
+            __ sll(result, left, shift);
+            __ Subu(result, result, left);
+          }
+
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  {
+            __ Subu(result, zero_reg, result);
+          }
+
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ mul(result, left, at);
+        }
+    }
+
+  } else {
+    Register right = EmitLoadRegister(right_op, scratch);
+    if (bailout_on_minus_zero) {
+      __ Or(ToRegister(instr->TempAt(0)), left, right);
+    }
+
+    if (can_overflow) {
+      // hi:lo = left * right.
+      __ mult(left, right);
+      __ mfhi(scratch);
+      __ mflo(result);
+      __ sra(at, result, 31);
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+    } else {
+      __ mul(result, left, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      // Bail out if the result is supposed to be negative zero.
+      Label done;
+      __ Branch(&done, ne, result, Operand(zero_reg));
+      DeoptimizeIf(lt,
+                   instr->environment(),
+                   ToRegister(instr->TempAt(0)),
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->InputAt(0);
+  LOperand* right_op = instr->InputAt(1);
+  ASSERT(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot() || right_op->IsArgument()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      __ Xor(result, left, right);
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->InputAt(1);
+  Register left = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ sll(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register array = ToRegister(instr->InputAt(0));
+  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+
+  // Load map into |result|.
+  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Load the map's "bit field 2" into |result|. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->TempAt(0));
+  Label done;
+
+  // If the object is a smi return the object.
+  __ Move(result, input);
+  __ JumpIfSmi(input, &done);
+
+  // If the object is not a value type, return the object.
+  __ GetObjectType(input, map, map);
+  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
+  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ Nor(result, zero_reg, Operand(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ push(input_reg);
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    __ stop("Unreachable code.");
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot() || right->IsArgument()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsArgument() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      ASSERT(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ SetCallCDoubleArguments(left, right);
+      __ CallCFunction(
+          ExternalReference::double_fp_operation(Token::MOD, isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ GetCFunctionDoubleResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block,
+                          Condition cc, Register src1, const Operand& src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::EmitBranchF(int left_block, int right_block,
+                           Condition cc, FPURegister src1, FPURegister src2) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateCondition(cc), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->InputAt(0));
+    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->InputAt(0));
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(true_block, false_block, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(true_label, eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(false_label, eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(false_label, eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ And(at, reg, Operand(kSmiTagMask));
+        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(false_label, ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(true_label, ne, at, Operand(zero_reg));
+        __ Branch(false_label);
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(false_label);
+        __ bind(&not_heap_number);
+      }
+
+      // We've seen something for the first time -> deopt.
+      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    __ jmp(chunk_->GetAssemblyLabel(block));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Condition cond = TokenToCondition(instr->op(), false);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand(0);
+
+      if (right->IsConstantOperand()) {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
+      } else if (left->IsConstantOperand()) {
+        cmp_left = ToRegister(right);
+        cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
+        // We transposed the operands. Reverse the condition.
+        cond = ReverseCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  EmitBranch(true_block, false_block, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitBranch(true_block, false_block, eq, left,
+             Operand(instr->hydrogen()->right()));
+}
+
+
+
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->InputAt(0));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(at, nil_value);
+  if (instr->kind() == kStrictEquality) {
+    EmitBranch(true_block, false_block, eq, reg, Operand(at));
+  } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ LoadRoot(at, other_nil_value);  // In the delay slot.
+    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
+    __ JumpIfSmi(reg, false_label);  // In the delay slot.
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch, 1 << Map::kIsUndetectable);
+    EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  __ JumpIfSmi(input, is_not_object);
+
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ Branch(is_object, eq, input, Operand(temp2));
+
+  // Load map.
+  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+  // Load instance type and check that it is in object type range.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+  __ Branch(is_not_object,
+            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+  return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+  EmitBranch(true_block, false_block, true_cond, temp2,
+             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  __ GetObjectType(input, temp1, temp1);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsString(reg, temp1, false_label);
+
+  EmitBranch(true_block, false_block, true_cond, temp1,
+             Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->InputAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ JumpIfSmi(input, false_label);
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(true_block,
+             false_block,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(input);
+  }
+
+  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ lw(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with this instance class-name adr
+// returned in temp reg, available for comparison by the caller. Trashes the
+// temp registers, but not the input. Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ JumpIfSmi(input, is_false);
+
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+    __ GetObjectType(input, temp, temp2);
+    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+  } else {
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ GetObjectType(input, temp, temp2);
+    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ GetObjectType(temp, temp2, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->TempAt(0));
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  Label true_label, done;
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0.
+  ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1.
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  __ Branch(&true_label, eq, result, Operand(zero_reg));
+  __ li(result, Operand(factory()->false_value()));
+  __ Branch(&done);
+  __ bind(&true_label);
+  __ li(result, Operand(factory()->true_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+   public:
+    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+                                  LInstanceOfKnownGlobal* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+    Label* map_check() { return &map_check_; }
+
+   private:
+    LInstanceOfKnownGlobal* instr_;
+    Label map_check_;
+  };
+
+  DeferredInstanceOfKnownGlobal* deferred;
+  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+  Label done, false_result;
+  Register object = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(object.is(a0));
+  ASSERT(result.is(v0));
+
+  // A Smi is not instance of anything.
+  __ JumpIfSmi(object, &false_result);
+
+  // This is the inlined call site instanceof cache. The two occurences of the
+  // hole value will be patched to the last map/result pair generated by the
+  // instanceof stub.
+  Label cache_miss;
+  Register map = temp;
+  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  __ bind(deferred->map_check());  // Label for calculating code patching.
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch with
+  // the cached map.
+  Handle<JSGlobalPropertyCell> cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ li(at, Operand(Handle<Object>(cell)));
+  __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+  __ Branch(&cache_miss, ne, map, Operand(at));
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch
+  // with true or false.
+  __ li(result, Operand(factory()->the_hole_value()), true);
+  __ Branch(&done);
+
+  // The inlined call site cache did not match. Check null and string before
+  // calling the deferred code.
+  __ bind(&cache_miss);
+  // Null is not instance of anything.
+  __ LoadRoot(temp, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, object, Operand(temp));
+
+  // String values is not instance of anything.
+  Condition cc = __ IsObjectStringType(object, temp, temp);
+  __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+  // Go to the deferred code.
+  __ Branch(deferred->entry());
+
+  __ bind(&false_result);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+  // Here result has either true or false. Deferred code also produces true or
+  // false object.
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
+  Register result = ToRegister(instr->result());
+  ASSERT(result.is(v0));
+
+  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kArgsInRegisters);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kCallSiteInlineCheck);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kReturnTrueFalseObject);
+  InstanceofStub stub(flags);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // Get the temp register reserved by the instruction. This needs to be t0 as
+  // its slot of the pushing of safepoint registers is used to communicate the
+  // offset to the location of the map check.
+  Register temp = ToRegister(instr->TempAt(0));
+  ASSERT(temp.is(t0));
+  __ li(InstanceofStub::right(), Operand(instr->function()));
+  static const int kAdditionalDelta = 7;
+  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+  Label before_push_delta;
+  __ bind(&before_push_delta);
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ li(temp, Operand(delta * kPointerSize), true);
+    __ StoreToSafepointRegisterSlot(temp, temp);
+  }
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LEnvironment* env = instr->deoptimization_environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  // Put the result value into the result register slot and
+  // restore all registers.
+  __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0.
+    __ push(v0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+  __ Addu(sp, sp, Operand(sp_delta));
+  __ Jump(ra);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+  Register result = ToRegister(instr->result());
+  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  __ li(a2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+                                             : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register value = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+
+  // Load the cell.
+  __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch2,
+          FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
+  }
+
+  // Store the value.
+  __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  MemOperand target = ContextOperand(context, instr->slot_index());
+  __ sw(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              kRAHasBeenSaved,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
+  LookupResult lookup(isolate());
+  type->LookupInDescriptors(NULL, *name, &lookup);
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    }
+  } else {
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+  Register object = ToRegister(instr->object());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  int map_count = instr->hydrogen()->types()->length();
+  Handle<String> name = instr->hydrogen()->name();
+  if (map_count == 0) {
+    ASSERT(instr->hydrogen()->need_generic());
+    __ li(a2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    Label done;
+    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    for (int i = 0; i < map_count - 1; ++i) {
+      Handle<Map> map = instr->hydrogen()->types()->at(i);
+      Label next;
+      __ Branch(&next, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&next);
+    }
+    Handle<Map> map = instr->hydrogen()->types()->last();
+    if (instr->hydrogen()->need_generic()) {
+      Label generic;
+      __ Branch(&generic, ne, scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ Branch(&done);
+      __ bind(&generic);
+      __ li(a2, Operand(name));
+      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+      CallCode(ic, RelocInfo::CODE_TARGET, instr);
+    } else {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Check that the function really is a function. Load map into the
+  // result register.
+  __ GetObjectType(function, result, scratch);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+  // Get the prototype or initial map from the function.
+  __ lw(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  __ Branch(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  __ bind(&non_instance);
+  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Register result = ToRegister(instr->result());
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    Label done, fail;
+    __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
+    __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot.
+    __ Branch(&done, eq, scratch, Operand(at));
+    // |scratch| still contains |input|'s map.
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+    __ Ext(scratch, scratch, Map::kElementsKindShift,
+           Map::kElementsKindBitCount);
+    __ Branch(&done, eq, scratch,
+              Operand(FAST_ELEMENTS));
+    __ Branch(&fail, lt, scratch,
+              Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ Branch(&done, le, scratch,
+              Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+    __ bind(&fail);
+    __ Abort("Check for fast or external elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadExternalArrayPointer(
+    LLoadExternalArrayPointer* instr) {
+  Register to_reg = ToRegister(instr->result());
+  Register from_reg  = ToRegister(instr->InputAt(0));
+  __ lw(to_reg, FieldMemOperand(from_reg,
+                                ExternalArray::kExternalPointerOffset));
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
+  // Bailout index is not a valid argument index. Use unsigned check to get
+  // negative check for free.
+
+  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
+  // as they do in Arm. It will save us an instruction.
+  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
+
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them, add one more.
+  __ subu(length, length, index);
+  __ Addu(length, length, Operand(1));
+  __ sll(length, length, kPointerSizeLog2);
+  __ Addu(at, arguments, Operand(length));
+  __ lw(result, MemOperand(at, 0));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = EmitLoadRegister(instr->key(), scratch0());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Load the result.
+  __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words.
+  __ addu(scratch, elements, scratch);
+  __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+    LLoadKeyedFastDoubleElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int shift_size =
+      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  if (key_is_constant) {
+    __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(elements, elements, Operand(scratch));
+    __ Addu(elements, elements,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+
+  __ ldc1(result, MemOperand(elements));
+}
+
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+    LLoadKeyedSpecializedArrayElement* instr) {
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0()));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0()));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    Register scratch = scratch0();
+    MemOperand mem_operand(zero_reg);
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_BYTE_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ lw(result, mem_operand);
+        // TODO(danno): we could be more clever here, perhaps having a special
+        // version of the stub that detects if the overflow case actually
+        // happens, and generate code that returns a double rather than int.
+        DeoptimizeIf(Ugreater_equal, instr->environment(),
+            result, Operand(0x80000000));
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->key()).is(a0));
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label done, adapted;
+  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Result is the frame pointer for the frame if not adapted and for the real
+  // frame below the adaptor frame if adapted.
+  __ movn(result, fp, temp);  // move only if temp is not equal to zero (ne)
+  __ movz(result, scratch, temp);  // move only if temp is equal to zero (eq)
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  ASSERT(receiver.is(a0));  // Used for parameter count.
+  ASSERT(function.is(a1));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+
+  // Do not transform the receiver to object for strict mode
+  // functions.
+  __ lw(scratch,
+         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(scratch,
+         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+  // Do not transform the receiver to object for builtins.
+  int32_t strict_mode_function_mask =
+                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr->environment(),
+               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ Branch(&receiver_ok);
+
+  __ bind(&global_object);
+  __ lw(receiver, GlobalObjectOperand());
+  __ lw(receiver,
+         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+  __ bind(&receiver_ok);
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+  __ bind(&loop);
+  __ Addu(scratch, elements, scratch);
+  __ lw(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Subu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+
+  __ bind(&invoke);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  v8::internal::ParameterCount actual(receiver);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->InputAt(0);
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort("DoPushArgument not implemented for double type.");
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  LoadHeapObject(result, instr->hydrogen()->closure());
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, cp);
+}
+
+
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result,
+        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register global = ToRegister(instr->global());
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
+  // Change context if needed.
+  bool change_context =
+      (info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  }
+
+  // Set a0 to arguments count if adaption is not needed. Assumes that a0
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ li(a0, Operand(arity));
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  __ SetCallKind(t1, call_kind);
+  __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(at);
+
+  // Setup deoptimization.
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  // Restore context.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ mov(a0, v0);
+  __ li(a1, Operand(instr->function()));
+  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(t0) ? a0 : t0;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
+  __ subu(result, zero_reg, input);
+  // Overflow if result is still negative, ie 0x80000000.
+  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->InputAt(0));
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->InputAt(0));
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  FPURegister single_scratch = double_scratch0().low();
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->TempAt(0));
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     single_scratch,
+                     input,
+                     scratch1,
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  // Load the result.
+  __ mfc1(result, single_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ mfc1(scratch1, input.high());
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ mfc1(result, input.high());
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr->environment(), scratch,
+               Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ mfc1(result, double_scratch0().high());
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr->environment(), result,
+                 Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     double_scratch0().low(),
+                     double_scratch0(),
+                     result,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+  __ mfc1(result, double_scratch0().low());
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ mfc1(scratch, input.high());
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch = double_scratch0();
+
+  // Add +0 to convert -0 to +0.
+  __ mtc1(zero_reg, double_scratch.low());
+  __ mtc1(zero_reg, double_scratch.high());
+  __ add_d(result, input, double_scratch);
+  __ sqrt_d(result, result);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  Register scratch = scratch0();
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  if (exponent_type.IsDouble()) {
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left),
+                               ToDoubleRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
+  } else if (exponent_type.IsInteger32()) {
+    ASSERT(ToRegister(right).is(a0));
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(isolate()), 1, 1);
+  } else {
+    ASSERT(exponent_type.IsTagged());
+    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+    Register right_reg = ToRegister(right);
+
+    // Check for smi on the right hand side.
+    Label non_smi, call;
+    __ JumpIfNotSmi(right_reg, &non_smi);
+
+    // Untag smi and convert it to a double.
+    __ SmiUntag(right_reg);
+    FPURegister single_scratch = double_scratch0();
+    __ mtc1(right_reg, single_scratch);
+    __ cvt_d_w(result_reg, single_scratch);
+    __ Branch(&call);
+
+    // Heap number map check.
+    __ bind(&non_smi);
+    __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+    __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
+
+    // Prepare arguments and call C function.
+    __ bind(&call);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
+  }
+  // Store the result in the result register.
+  __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(f4));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
+    case kMathCos:
+      DoMathCos(instr);
+      break;
+    case kMathSin:
+      DoMathSin(instr);
+      break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
+    case kMathLog:
+      DoMathLog(instr);
+      break;
+    default:
+      Abort("Unimplemented type of LUnaryMathOperation.");
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(a1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(a1));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
+  __ li(a2, Operand(instr->name()));
+  CallCode(ic, mode, instr);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ li(a1, Operand(instr->target()));
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+  __ li(a0, Operand(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  int offset = instr->offset();
+
+  ASSERT(!object.is(value));
+
+  if (!instr->transition().is_null()) {
+    __ li(scratch, Operand(instr->transition()));
+    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  }
+
+  // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  if (instr->is_in_object()) {
+    __ sw(value, FieldMemOperand(object, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  } else {
+    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    __ sw(value, FieldMemOperand(scratch, offset));
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          kRAHasBeenSaved,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  // Name is always in a2.
+  __ li(a2, Operand(instr->name()));
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  DeoptimizeIf(hs,
+               instr->environment(),
+               ToRegister(instr->index()),
+               Operand(ToRegister(instr->length())));
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ And(at, value, Operand(kSmiTagMask));
+    DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+  }
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ sw(value, FieldMemOperand(elements, offset));
+  } else {
+    __ sll(scratch, key, kPointerSizeLog2);
+    __ addu(scratch, elements, scratch);
+    __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kRAHasBeenSaved,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+    LStoreKeyedFastDoubleElement* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  Label not_nan;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
+            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ sll(scratch, key, shift_size);
+    __ Addu(scratch, elements, Operand(scratch));
+    __ Addu(scratch, scratch,
+            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  }
+
+  Label is_nan;
+  // Check for NaN. All NaNs must be canonicalized.
+  __ BranchF(NULL, &is_nan, eq, value, value);
+  __ Branch(&not_nan);
+
+  // Only load canonical NaN if the comparison above set the overflow.
+  __ bind(&is_nan);
+  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+  __ bind(&not_nan);
+  __ sdc1(value, MemOperand(scratch));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+    LStoreKeyedSpecializedArrayElement* instr) {
+
+  Register external_pointer = ToRegister(instr->external_pointer());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+
+  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(scratch0()));
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ sdc1(value, MemOperand(scratch0()));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand(zero_reg);
+    Register scratch = scratch0();
+    if (key_is_constant) {
+      mem_operand = MemOperand(external_pointer,
+                               constant_key * (1 << shift_size));
+    } else {
+      __ sll(scratch, key, shift_size);
+      __ Addu(scratch, scratch, external_pointer);
+      mem_operand = MemOperand(scratch);
+    }
+    switch (elements_kind) {
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(a2));
+  ASSERT(ToRegister(instr->key()).is(a1));
+  ASSERT(ToRegister(instr->value()).is(a0));
+
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  __ mov(ToRegister(instr->result()), object_reg);
+
+  Label not_applicable;
+  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  __ li(new_map_reg, Operand(to_map));
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(a2));
+    ASSERT(new_map_reg.is(a3));
+    __ mov(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt: public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new DeferredStringCharCodeAt(this, instr);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+  if (FLAG_debug_code) {
+    __ AbortIfNotSmi(v0);
+  }
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode: public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new DeferredStringCharFromCode(this, instr);
+
+  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  ASSERT(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxAsciiCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ sll(scratch, char_code, kPointerSizeLog2);
+  __ Addu(result, result, scratch);
+  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+  Register string = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ lw(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+  Register overflow = scratch0();
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTagCheckOverflow(reg, overflow);
+  __ BranchOnOverflow(deferred->entry(), overflow);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->InputAt(0));
+  FPURegister dbl_scratch = double_scratch0();
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  Label done;
+  __ SmiUntag(reg);
+  __ Xor(reg, reg, Operand(0x80000000));
+  __ mtc1(reg, dbl_scratch);
+  __ cvt_d_w(dbl_scratch, dbl_scratch);
+  if (FLAG_inline_new) {
+    __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+    if (!reg.is(t1)) __ mov(reg, t1);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ StoreToSafepointRegisterSlot(zero_reg, reg);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  if (!reg.is(v0)) __ mov(reg, v0);
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+  __ StoreToSafepointRegisterSlot(reg, reg);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
+    __ SmiUntag(ToRegister(input));
+    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(ToRegister(input));
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                DoubleRegister result_reg,
+                                bool deoptimize_on_undefined,
+                                LEnvironment* env) {
+  Register scratch = scratch0();
+
+  Label load_smi, heap_number, done;
+
+  // Smi check.
+  __ JumpIfSmi(input_reg, &load_smi);
+
+  // Heap number map check.
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  if (deoptimize_on_undefined) {
+    DeoptimizeIf(ne, env, scratch, Operand(at));
+  } else {
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch, Operand(at));
+
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, env, input_reg, Operand(at));
+
+    // Convert undefined to NaN.
+    __ LoadRoot(at, Heap::kNanValueRootIndex);
+    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+  }
+  // Heap number to double register conversion.
+  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ Branch(&done);
+
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ mtc1(input_reg, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->InputAt(0));
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch.low();
+
+  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+    ASSERT(!scratch3.is(input_reg) &&
+           !scratch3.is(scratch1) &&
+           !scratch3.is(scratch2));
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label heap_number;
+    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+    ASSERT(ToRegister(instr->result()).is(input_reg));
+    __ mov(input_reg, zero_reg);
+    __ Branch(&done);
+
+    __ bind(&heap_number);
+    __ ldc1(double_scratch2,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    __ EmitECMATruncate(input_reg,
+                        double_scratch2,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed.
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(input_reg, single_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ mfc1(scratch1, double_scratch.high());
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Let the deferred code handle the HeapObject case.
+  __ JumpIfNotSmi(input_reg, deferred->entry());
+
+  // Smi to int32 conversion.
+  __ SmiUntag(input_reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg,
+                   instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
+  DoubleRegister double_scratch = double_scratch0();
+  FPURegister single_scratch = double_scratch0().low();
+
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    __ EmitECMATruncate(result_reg,
+                        double_input,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    Register except_flag = scratch2;
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       single_scratch,
+                       double_input,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+    // Load the result.
+    __ mfc1(result_reg, single_scratch);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ And(at, ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
+          at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Register reg = ToRegister(instr->InputAt(0));
+  DeoptimizeIf(ne, instr->environment(), reg,
+               Operand(instr->hydrogen()->target()));
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  Register scratch = scratch0();
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+  DeoptimizeIf(ne,
+               instr->environment(),
+               scratch,
+               Operand(instr->hydrogen()->map()));
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr->environment(), input_reg,
+               Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(scratch, input_reg);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::LoadHeapObject(Register result,
+                              Handle<HeapObject> object) {
+  if (heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        factory()->NewJSGlobalPropertyCell(object);
+    __ li(result, Operand(cell));
+    __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ li(result, Operand(object));
+  }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<JSObject> current_prototype = instr->prototype();
+
+  // Load prototype object.
+  LoadHeapObject(temp1, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+    DeoptimizeIf(ne,
+                 instr->environment(),
+                 temp2,
+                 Operand(Handle<Map>(current_prototype->map())));
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadHeapObject(temp1, current_prototype);
+  }
+
+  // Check the holder map.
+  __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+  DeoptimizeIf(ne,
+               instr->environment(),
+               temp2,
+               Operand(Handle<Map>(current_prototype->map())));
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a1, Operand(constant_elements));
+  __ Push(a3, a2, a1);
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        constant_elements_kind == FAST_DOUBLE_ELEMENTS
+        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(a2));
+  ASSERT(!result.is(a2));
+
+  // Increase the offset so that subsequent objects end up right after
+  // this one.
+  int current_offset = *offset;
+  int size = object->map()->instance_size();
+  *offset += size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  ASSERT(object->elements()->length() == 0 ||
+         object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    __ lw(a2, FieldMemOperand(source, i));
+    __ sw(a2, FieldMemOperand(result, current_offset + i));
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ Addu(a2, result, Operand(*offset));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+      LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    } else {
+      __ li(a2, Operand(value));
+      __ sw(a2, FieldMemOperand(result, total_offset));
+    }
+  }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
+  __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a2, Operand(constant_properties));
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  __ li(a1, Operand(Smi::FromInt(flags)));
+  __ Push(t0, a3, a2, a1);
+
+  // Pick the right runtime function to call.
+  int properties_count = constant_properties->length() / 2;
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+  ASSERT(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Label materialized;
+  // Registers will be used as follows:
+  // a3 = JS function.
+  // t3 = literals array.
+  // a1 = regexp literal.
+  // a0 = regexp literal clone.
+  // a2 and t0-t2 are used as temporaries.
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ lw(a1, FieldMemOperand(t3, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, a1, Operand(at));
+
+  // Create regexp literal using runtime function
+  // Result will be in v0.
+  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(t1, Operand(instr->hydrogen()->pattern()));
+  __ li(t0, Operand(instr->hydrogen()->flags()));
+  __ Push(t3, t2, t1, t0);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(a1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ Push(a1, a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(a1);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ lw(a3, FieldMemOperand(a1, i));
+    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, i));
+    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
+    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (!pretenure && shared_info->num_literals() == 0) {
+    FastNewClosureStub stub(shared_info->language_mode());
+    __ li(a1, Operand(shared_info));
+    __ push(a1);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a2, Operand(shared_info));
+    __ li(a1, Operand(pretenure
+                       ? factory()->true_value()
+                       : factory()->false_value()));
+    __ Push(cp, a2, a1);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  ASSERT(ToRegister(instr->result()).is(v0));
+  Register input = ToRegister(instr->InputAt(0));
+  __ push(input);
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal(),
+                                                  cmp1,
+                                                  cmp2);
+
+  ASSERT(cmp1.is_valid());
+  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register& cmp1,
+                                 Operand& cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  if (type_name->Equals(heap()->number_symbol())) {
+    __ JumpIfSmi(input, true_label);
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    cmp1 = input;
+    cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->string_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+    // input is an object so we can load the BitFieldOffset even if we take the
+    // other branch.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    cmp1 = at;
+    cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, scratch, input);
+    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+    cmp1 = input;
+    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(heap()->object_symbol())) {
+    __ JumpIfSmi(input, false_label);
+    if (!FLAG_harmony_typeof) {
+      __ LoadRoot(at, Heap::kNullValueRootIndex);
+      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    }
+    // input is an object, it is safe to use GetObjectType in the delay slot.
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the InstanceType can be loaded.
+    __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+    __ Branch(USE_DELAY_SLOT, false_label,
+              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Still an object, so the BitField can be loaded.
+    // Check for undetectable objects => false.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else {
+    cmp1 = at;
+    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+  Register temp1 = ToRegister(instr->TempAt(0));
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  EmitIsConstructCall(temp1, scratch0());
+
+  EmitBranch(true_block, false_block, eq, temp1,
+             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+  ASSERT(!temp1.is(temp2));
+  // Get the frame pointer for the calling frame.
+  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne, temp2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= Assembler::kInstrSize;
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  EnsureSpaceForLazyDeopt();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Register object = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  Register strict = scratch0();
+  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
+  __ Push(object, key, strict);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  Register obj = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(key, obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStackCheck* instr_;
+  };
+
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    StackCheckStub stub;
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt();
+    __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  } else {
+    ASSERT(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt();
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 2aec684..32d4fb3 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -29,35 +29,412 @@
 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
 #include "mips/lithium-mips.h"
-
+#include "mips/lithium-gap-resolver-mips.h"
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
 
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
-
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LDeferredCode;
+class SafepointGenerator;
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deopt_jump_table_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(info->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1),
+        last_lazy_deopt_pc_(0),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  // Simple accessors.
+  MacroAssembler* masm() const { return masm_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info_->isolate(); }
+  Factory* factory() const { return isolate()->factory(); }
+  Heap* heap() const { return isolate()->heap(); }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
 
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
-  bool GenerateCode() {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool GenerateCode();
 
   // Finish the code by setting stack height, safepoint, and bailout
   // information on it.
-  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+  void FinishCode(Handle<Code> code);
+
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+
+  Register scratch0() { return lithiumScratchReg; }
+  Register scratch1() { return lithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return lithiumScratchDouble; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateDeoptJumpTable();
+  bool GenerateSafepointTable();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr,
+                         CallKind call_kind);
+
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition cc,
+                    LEnvironment* environment,
+                    Register src1,
+                    const Operand& src2);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+                                              int arguments,
+                                              Safepoint::DeoptMode mode);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+  void EmitBranch(int left_block,
+                  int right_block,
+                  Condition cc,
+                  Register src1,
+                  const Operand& src2);
+  void EmitBranchF(int left_block,
+                   int right_block,
+                   Condition cc,
+                   FPURegister src1,
+                   FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input,
+                        DoubleRegister result,
+                        bool deoptimize_on_undefined,
+                        LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register& cmp1,
+                         Operand& cmp2);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
+  // Emits optimized code for %_IsConstructCall().
+  // Caller should branch on equal condition.
+  void EmitIsConstructCall(Register temp1, Register temp2);
+
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
+  struct JumpTableEntry {
+    explicit inline JumpTableEntry(Address entry)
+        : label(),
+          address(entry) { }
+    Label label;
+    Address address;
+  };
+
+  void EnsureSpaceForLazyDeopt();
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> deopt_jump_table_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+  int last_lazy_deopt_pc_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    PushSafepointRegistersScope(LCodeGen* codegen,
+                                Safepoint::Kind kind)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = kind;
+
+      switch (codegen_->expected_safepoint_kind_) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PushSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PushSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    ~PushSafepointRegistersScope() {
+      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+      ASSERT((kind & Safepoint::kWithRegisters) != 0);
+      switch (kind) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PopSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PopSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 0000000..8f7f89c
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,309 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "mips/lithium-gap-resolver-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = lithiumScratchReg;
+static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  ASSERT(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      ASSERT(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  ASSERT(!moves_[index].IsPending());
+  ASSERT(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    ASSERT(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  ASSERT(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  ASSERT(in_cycle_);
+  ASSERT(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kSavedDoubleValueRegister);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kSavedDoubleValueRegister,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      __ sw(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(cgen_->ToRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ lwc1(kSavedDoubleValueRegister, source_operand);
+          __ swc1(kSavedDoubleValueRegister, destination_operand);
+        } else {
+          __ lw(at, source_operand);
+          __ sw(at, destination_operand);
+        }
+      } else {
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    Operand source_operand = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      __ li(cgen_->ToRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ li(kSavedValueRegister, source_operand);
+      __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kSavedDoubleValueRegister was used to break the cycle,
+        // but kSavedValueRegister is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kSavedValueRegister, source_operand);
+        __ sw(kSavedValueRegister, destination_operand);
+        __ lw(kSavedValueRegister, source_high_operand);
+        __ sw(kSavedValueRegister, destination_high_operand);
+      } else {
+        __ ldc1(kSavedDoubleValueRegister, source_operand);
+        __ sdc1(kSavedDoubleValueRegister, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.h b/src/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 0000000..2506e38
--- /dev/null
+++ b/src/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
new file mode 100644
index 0000000..19892fc
--- /dev/null
+++ b/src/mips/lithium-mips.cc
@@ -0,0 +1,2243 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  ASSERT(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < inputs_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    inputs_[i]->PrintTo(stream);
+  }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  for (int i = 0; i < results_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    results_[i]->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+  stream->Add("/%s ", hydrogen()->OpName());
+  InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  InputAt(0)->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  InputAt(1)->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+  stream->Add("[a2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+  SmartArrayPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+    : spill_slot_count_(0),
+      info_(info),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LInstructionGap* gap = new LInstructionGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(info(), graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartArrayPointer<char> name(
+        info()->shared_info()->DebugName()->ToCString());
+    PrintF("Aborting LChunk building in @\"%s\": ", *name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+                                    LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateInstruction<1, I, T>* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateInstruction<1, I, T>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instruction_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instruction_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasObservableSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+  instr->MarkAsSaveDoubles();
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsTagged()) {
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->left());
+
+  HValue* right_value = instr->right();
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseRegisterAtStart(right_value);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
+        break;
+      }
+    }
+  }
+
+  LInstruction* result =
+      DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  ASSERT(op != Token::MOD);
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    instr->set_hydrogen_value(current);
+    chunk_->AddInstruction(instr, current_block_);
+  }
+  current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument((*argument_index_accumulator)++);
+    } else {
+      op = UseAny(value);
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new LGoto(instr->FirstSuccessor()->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  if (v->EmitAtUses()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
+  }
+  return AssignEnvironment(new LBranch(UseRegister(v)));
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstanceOf* result =
+      new LInstanceOf(UseFixed(instr->left(), a0),
+                      UseFixed(instr->right(), a1));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  LInstanceOfKnownGlobal* result =
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), FixedTemp(t0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalObject(context));
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  LOperand* global_object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LGlobalReceiver(global_object));
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+    LOperand* input = UseFixedDouble(instr->value(), f4);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+    return MarkAsCall(DefineFixedDouble(result, f4), instr);
+  } else {
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+    switch (op) {
+      case kMathAbs:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathFloor:
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      case kMathSqrt:
+        return DefineAsRegister(result);
+      case kMathRound:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathPowHalf:
+        return DefineAsRegister(result);
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  LOperand* key = UseFixed(instr->key(), a2);
+  return MarkAsCall(DefineFixed(new LCallKeyed(key), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  argument_count_ -= instr->argument_count();
+  LCallNew* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction(function), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), a1);
+    LOperand* right = UseFixed(instr->right(), a0);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // TODO(1042) The fixed register allocation
+    // is needed because we call TypeRecordingBinaryOpStub from
+    // the generated code, which requires registers a0
+    // and a1 to be used. We should remove that
+    // when we provide a native implementation.
+    LOperand* dividend = UseFixed(instr->left(), a0);
+    LOperand* divisor = UseFixed(instr->right(), a1);
+    return AssignEnvironment(AssignPointerMap(
+             DefineFixed(new LDivI(dividend, divisor), v0)));
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LModI* mod;
+    if (instr->HasPowerOf2Divisor()) {
+      ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+      LOperand* value = UseRegisterAtStart(instr->left());
+      mod = new LModI(value, UseOrConstant(instr->right()));
+    } else {
+      LOperand* dividend = UseRegister(instr->left());
+      LOperand* divisor = UseRegister(instr->right());
+      mod = new LModI(dividend,
+                      divisor,
+                      TempRegister(),
+                      FixedTemp(f20),
+                      FixedTemp(f22));
+    }
+
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      return AssignEnvironment(DefineAsRegister(mod));
+    } else {
+      return DefineAsRegister(mod);
+    }
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left;
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+        (instr->CheckFlag(HValue::kCanOverflow) ||
+        !right->IsConstantOperand())) {
+      left = UseRegister(instr->LeastConstantOperand());
+      temp = TempRegister();
+    } else {
+      left = UseRegisterAtStart(instr->LeastConstantOperand());
+    }
+    return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  ASSERT(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  ASSERT(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right = exponent_type.IsDouble() ?
+      UseFixedDouble(instr->right(), f4) :
+      UseFixed(instr->right(), a0);
+  LPower* result = new LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f6),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Representation r = instr->GetInputRepresentation();
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new LCmpT(left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+    HCompareIDAndBranch* instr) {
+  Representation r = instr->GetInputRepresentation();
+  if (r.IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
+  } else {
+    ASSERT(r.IsDouble());
+    ASSERT(instr->left()->representation().IsDouble());
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+    HCompareConstantEqAndBranch* instr) {
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LJSArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
+    HFixedArrayBaseLength* instr) {
+  LOperand* array = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+  LOperand* object = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LElementsKind(object));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LValueOf* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            UseRegister(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), a0);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LNumberUntagD* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      LInstruction* res = NULL;
+      if (!needs_check) {
+        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      } else {
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+                                                      : NULL;
+        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+                                                      : NULL;
+        res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+        res = AssignEnvironment(res);
+      }
+      return res;
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      // Make sure that the temp and result_temp registers are
+      // different.
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+      Define(result, result_temp);
+      return AssignPointerMap(result);
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      LDoubleToI* res =
+        new LDoubleToI(value,
+                       TempRegister(),
+                       instr->CanTruncateToInt32() ? TempRegister() : NULL);
+      return AssignEnvironment(DefineAsRegister(res));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LNumberTagI* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new LInteger32ToDouble(value));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(f22)));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve f22 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(f22));
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+    return AssignEnvironment(DefineAsRegister(res));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LOperand* temp3 = FixedTemp(f22);
+    LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+    return AssignEnvironment(DefineSameAsFirst(res));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), v0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    return DefineAsRegister(new LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new LConstantD);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a0);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LOperand* temp = TempRegister();
+  LOperand* value = UseTempRegister(instr->value());
+  LInstruction* result = new LStoreGlobalCell(value, temp);
+  if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), a1);
+  LOperand* value = UseFixed(instr->value(), a0);
+  LStoreGlobalGeneric* result =
+      new LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadContextSlot(context));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  return new LStoreContextSlot(context, value);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+    HLoadNamedFieldPolymorphic* instr) {
+  ASSERT(instr->representation().IsTagged());
+  if (instr->need_generic()) {
+    LOperand* obj = UseFixed(instr->object(), a0);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  } else {
+    LOperand* obj = UseRegisterAtStart(instr->object());
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+    HLoadExternalArrayPointer* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LLoadExternalArrayPointer(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  ASSERT(instr->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+    HLoadKeyedFastDoubleElement* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* elements = UseTempRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LLoadKeyedFastDoubleElement* result =
+      new LLoadKeyedFastDoubleElement(elements, key);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+    HLoadKeyedSpecializedArrayElement* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LLoadKeyedSpecializedArrayElement* result =
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+  LInstruction* load_instr = DefineAsRegister(result);
+  // An unsigned int array load might overflow and cause a deopt, make sure it
+  // has an environment.
+  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+      AssignEnvironment(load_instr) : load_instr;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), a1);
+  LOperand* key = UseFixed(instr->key(), a0);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+    HStoreKeyedFastDoubleElement* instr) {
+  ASSERT(instr->value()->representation().IsDouble());
+  ASSERT(instr->elements()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LOperand* val = UseTempRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+    HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
+  ElementsKind elements_kind = instr->elements_kind();
+  ASSERT(
+      (representation.IsInteger32() &&
+       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+      (representation.IsDouble() &&
+       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+  ASSERT(instr->external_pointer()->representation().IsExternal());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  bool val_is_temp_register =
+      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+  LOperand* val = val_is_temp_register
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+
+  return new LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                key,
+                                                val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a2);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, NULL);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* fixed_object_reg = FixedTemp(a2);
+    LOperand* new_map_reg = FixedTemp(a3);
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, v0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  return new LStoreNamedField(obj, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), a1);
+  LOperand* val = UseFixed(instr->value(), a0);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LStringCharFromCode* result = new LStringCharFromCode(char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+  LOperand* string = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new LStringLength(string));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+    HObjectLiteralGeneric* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LOperand* object = UseFixed(instr->object(), a0);
+  LOperand* key = UseFixed(instr->key(), a1);
+  LDeleteProperty* result = new LDeleteProperty(object, key);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  if (spill_index > LUnallocated::kMaxFixedIndex) {
+    Abort("Too many spill slots needed for OSR");
+    spill_index = 0;
+  }
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = UseRegister(instr->index());
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LTypeof* result = new LTypeof(UseFixed(instr->value(), a0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instruction_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    return MarkAsCall(new LStackCheck, instr);
+  } else {
+    ASSERT(instr->is_backwards_branch());
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->call_kind());
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index ebc1e43..eb85f10 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -32,131 +32,303 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
-
-// Note: this file was taken from the X64 version. ARM has a partially working
-// lithium implementation, but for now it is not ported to mips.
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LCodeGen;
-class LEnvironment;
-class Translation;
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(ControlInstruction)                         \
+  V(Call)                                       \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckNonSmi)                                \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
+  V(ClassOfTestAndBranch)                       \
+  V(CmpConstantEqAndBranch)                     \
+  V(CmpIDAndBranch)                             \
+  V(CmpObjectEqAndBranch)                       \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(Context)                                    \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(ElementsKind)                               \
+  V(FixedArrayBaseLength)                       \
+  V(FunctionLiteral)                            \
+  V(GetCachedArrayIndex)                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
+  V(InstanceOf)                                 \
+  V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
+  V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCallAndBranch)                   \
+  V(IsNilAndBranch)                             \
+  V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
+  V(IsSmiAndBranch)                             \
+  V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
+  V(JSArrayLength)                              \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadContextSlot)                            \
+  V(LoadElements)                               \
+  V(LoadExternalArrayPointer)                   \
+  V(LoadFunctionPrototype)                      \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
+  V(LoadKeyedFastDoubleElement)                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadKeyedSpecializedArrayElement)           \
+  V(LoadNamedField)                             \
+  V(LoadNamedFieldPolymorphic)                  \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteralFast)                          \
+  V(ObjectLiteralGeneric)                       \
+  V(OsrEntry)                                   \
+  V(OuterContext)                               \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreContextSlot)                           \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
+  V(StoreKeyedFastDoubleElement)                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreKeyedSpecializedArrayElement)          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
+  V(StringCharCodeAt)                           \
+  V(StringCharFromCode)                         \
+  V(StringLength)                               \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(ThisFunction)                               \
+  V(Throw)                                      \
+  V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
+  V(Typeof)                                     \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
 
 class LInstruction: public ZoneObject {
  public:
-  LInstruction() { }
+  LInstruction()
+      :  environment_(NULL),
+         hydrogen_value_(NULL),
+         is_call_(false),
+         is_save_doubles_(false) { }
   virtual ~LInstruction() { }
 
-  // Predicates should be generated by macro as in lithium-ia32.h.
-  virtual bool IsLabel() const {
-    UNIMPLEMENTED();
-    return false;
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
   }
-  virtual bool IsOsrEntry() const {
-    UNIMPLEMENTED();
-    return false;
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
   }
 
-  LPointerMap* pointer_map() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasPointerMap() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
-
-  LEnvironment* environment() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool HasEnvironment() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
-
-  virtual bool IsControl() const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  void MarkAsCall() { UNIMPLEMENTED(); }
-  void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+  void MarkAsCall() { is_call_ = true; }
+  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  bool IsMarkedAsCall() const { return is_call_; }
+  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
-  bool IsMarkedAsSaveDoubles() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() = 0;
 
-  virtual bool HasResult() const {
-    UNIMPLEMENTED();
-    return false;
-  }
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
 
-  virtual LOperand* result() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int InputCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* InputAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  virtual int TempCount() {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  virtual LOperand* TempAt(int i) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* FirstInput() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LOperand* Output() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
 
 #ifdef DEBUG
-  void VerifyCall() { UNIMPLEMENTED(); }
+  void VerifyCall();
 #endif
+
+ private:
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+  bool is_call_;
+  bool is_save_doubles_;
 };
 
 
-class LGap: public LInstruction {
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
  public:
-  explicit LGap(HBasicBlock* block) { }
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const { return R != 0; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() { return results_[0]; }
 
-  HBasicBlock* block() const {
-    UNIMPLEMENTED();
-    return NULL;
+  int InputCount() { return I; }
+  LOperand* InputAt(int i) { return inputs_[i]; }
+
+  int TempCount() { return T; }
+  LOperand* TempAt(int i) { return temps_[i]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
   }
 
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
+  virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
   enum InnerPosition {
     BEFORE,
     START,
@@ -166,141 +338,1927 @@
     LAST_INNER_POSITION = AFTER
   };
 
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
-    UNIMPLEMENTED();
-    return NULL;
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
   }
 
   LParallelMove* GetParallelMove(InnerPosition pos)  {
-    UNIMPLEMENTED();
-    return NULL;
+    return parallel_moves_[pos];
   }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(int block_id) : block_id_(block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream);
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+
+ private:
+  int block_id_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
 };
 
 
 class LLabel: public LGap {
  public:
-  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
 };
 
 
-class LOsrEntry: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
  public:
-  // Function could be generated by a macro as in lithium-ia32.h.
-  static LOsrEntry* cast(LInstruction* instr) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
 
-  LOperand** SpilledRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-  LOperand** SpilledDoubleRegisterArray() {
-    UNIMPLEMENTED();
-    return NULL;
-  }
 
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
-    UNIMPLEMENTED();
-  }
-  void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand) {
-    UNIMPLEMENTED();
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
   }
 };
 
 
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+  virtual bool IsControl() const { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 3> {
+ public:
+  // Used when the right hand is a constant power of 2.
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = NULL;
+    temps_[1] = NULL;
+    temps_[2] = NULL;
+  }
+
+  // Used for the standard case.
+  LModI(LOperand* left,
+        LOperand* right,
+        LOperand* temp1,
+        LOperand* temp2,
+        LOperand* temp3) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpIDAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->GetInputRepresentation().IsDouble();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LUnaryMathOperation(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream);
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+                               "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpConstantEqAndBranch(LOperand* left) {
+    inputs_[0] = left;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+                               "cmp-constant-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
+};
+
+
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsNilAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpT(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantD: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value() const { return hydrogen()->handle(); }
+};
+
+
+class LBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return hydrogen()->map(); }
+  int true_block_id() const {
+    return hydrogen()->FirstSuccessor()->block_id();
+  }
+  int false_block_id() const {
+    return hydrogen()->SecondSuccessor()->block_id();
+  }
+};
+
+
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LJSArrayLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+  DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFixedArrayBaseLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
+                               "fixed-array-base-length")
+  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
+};
+
+
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LElementsKind(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LValueOf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LThrow(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LBitNotI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LReturn(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+  LOperand* object() { return inputs_[0]; }
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+  LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadElements(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadExternalArrayPointer(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+                               "load-external-array-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+                               "load-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                    LOperand* key) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+                               "load-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* value() { return InputAt(1); }
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LOuterContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalObject(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+
+  LOperand* context() { return InputAt(0); }
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGlobalReceiver(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+  LOperand* global() { return InputAt(0); }
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> function() { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallKeyed(LOperand* key) {
+    inputs_[0] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallNew(LOperand* constructor) {
+    inputs_[0] = constructor;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberTagI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp1,
+             LOperand* temp2,
+             LOperand* temp3) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedField(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool is_in_object() { return hydrogen()->is_in_object(); }
+  int offset() { return hydrogen()->offset(); }
+  Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedFastDoubleElement(LOperand* elements,
+                               LOperand* key,
+                               LOperand* val) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+                               "store-keyed-fast-double-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+                                     LOperand* key,
+                                     LOperand* val) {
+    inputs_[0] = external_pointer;
+    inputs_[1] = key;
+    inputs_[2] = val;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+                               "store-keyed-specialized-array-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+  LOperand* external_pointer() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharCodeAt(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+  LOperand* string() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* char_code) {
+    inputs_[0] = char_code;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+  LOperand* char_code() { return inputs_[0]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LStringLength(LOperand* string) {
+    inputs_[0] = string;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+  DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+  LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckFunction(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMap(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+  LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)  {
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+  Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+};
+
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LTypeof(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+  explicit LIsConstructCallAndBranch(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+                               "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
+class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(HGraph* graph) { }
+  explicit LChunk(CompilationInfo* info, HGraph* graph);
 
-  HGraph* graph() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
 
-  const ZoneList<LPointerMap*>* pointer_maps() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
 
-  LOperand* GetNextSpillSlot(bool double_slot) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  LConstantOperand* DefineConstantOperand(HConstant* constant) {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
   LLabel* GetLabel(int block_id) const {
-    UNIMPLEMENTED();
-    return NULL;
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
   }
 
-  const ZoneList<LInstruction*>* instructions() const {
-    UNIMPLEMENTED();
-    return NULL;
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
   }
 
-  int GetParameterStackSlot(int index) const {
-    UNIMPLEMENTED();
-    return 0;
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
   }
 
-  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
-  LGap* GetGapAt(int index) const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-  bool IsGapAt(int index) const {
-    UNIMPLEMENTED();
-    return false;
-  }
-
-  int NearestGapPos(int index) const {
-    UNIMPLEMENTED();
-    return 0;
-  }
-
-  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-  CompilationInfo* info() const {
-    UNIMPLEMENTED();
-    return NULL;
-  }
-
-#ifdef DEBUG
-  void Verify() { UNIMPLEMENTED(); }
-#endif
+ private:
+  int spill_slot_count_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
 };
 
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        info_(info),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instruction_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
 
   // Build the sequence for the graph.
-  LChunk* Build() {
-    UNIMPLEMENTED();
-    return NULL;
-  };
+  LChunk* Build();
 
   // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
-    UNIMPLEMENTED(); \
-    return NULL; \
-  }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
   HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
 #undef DECLARE_DO
 
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+                           LUnallocated* result);
+  template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+                                    int index);
+  template<int I, int T>
+      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
+      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+                                Register reg);
+  template<int I, int T>
+      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+                                      DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instruction_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
   DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
 };
 
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 1c0af5d..a6dcddc 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -80,46 +81,15 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register address,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, ne, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
-  }
-
-  // Calculate page address: Clear bits from 0 to kPageSizeBits.
-  if (mips32r2) {
-    Ins(object, zero_reg, 0, kPageSizeBits);
-  } else {
-    // The Ins macro is slow on r1, so use shifts instead.
-    srl(object, object, kPageSizeBits);
-    sll(object, object, kPageSizeBits);
-  }
-
-  // Calculate region number.
-  Ext(address, address, Page::kRegionSizeLog2,
-      kPageSizeBits - Page::kRegionSizeLog2);
-
-  // Mark region dirty.
-  lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-  li(at, Operand(1));
-  sllv(at, at, address);
-  or_(scratch, scratch, at);
-  sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
 // Push and pop all registers that can hold pointers.
 void MacroAssembler::PushSafepointRegisters() {
   // Safepoints expect a block of kNumSafepointRegisters values on the
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   ASSERT(num_unsaved >= 0);
-  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
   MultiPush(kSafepointSavedRegisters);
 }
 
@@ -127,7 +97,9 @@
 void MacroAssembler::PopSafepointRegisters() {
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   MultiPop(kSafepointSavedRegisters);
-  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
 }
 
 
@@ -180,6 +152,7 @@
 
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  UNIMPLEMENTED_MIPS();
   // General purpose registers are pushed last on the stack.
   int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -187,8 +160,6 @@
 }
 
 
-
-
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cc,
@@ -200,38 +171,53 @@
 }
 
 
-// Will clobber 4 registers: object, scratch0, scratch1, at. The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
-                                 Operand offset,
-                                 Register scratch0,
-                                 Register scratch1) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    RAStatus ra_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
+  ASSERT(!AreAliased(value, dst, t8, object));
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch0, eq, &done);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
 
-  // Add offset into the object.
-  Addu(scratch0, object, offset);
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
 
-  // Record the actual write.
-  RecordWriteHelper(object, scratch0, scratch1);
+  Addu(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              ra_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(object, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -241,29 +227,96 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register scratch) {
+                                 Register value,
+                                 RAStatus ra_status,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
+  ASSERT(!AreAliased(object, address, value, t8));
+  ASSERT(!AreAliased(object, address, value, t9));
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+  ASSERT(!address.is(cp) && !value.is(cp));
 
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch, eq, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    JumpIfSmi(value, &done);
+  }
+
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
 
   // Record the actual write.
-  RecordWriteHelper(object, address, scratch);
+  if (ra_status == kRAHasNotBeenSaved) {
+    push(ra);
+  }
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
+  if (ra_status == kRAHasNotBeenSaved) {
+    pop(ra);
+  }
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(object, Operand(BitCast<int32_t>(kZapValue)));
-    li(address, Operand(BitCast<int32_t>(kZapValue)));
-    li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  li(t8, Operand(store_buffer));
+  lw(scratch, MemOperand(t8));
+  // Store pointer to buffer and increment buffer top.
+  sw(address, MemOperand(scratch));
+  Addu(scratch, scratch, kPointerSize);
+  // Write back new top of buffer.
+  sw(scratch, MemOperand(t8));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kFallThroughAtEnd) {
+    Branch(&done, eq, t8, Operand(zero_reg));
+  } else {
+    ASSERT(and_then == kReturnAtEnd);
+    Ret(eq, t8, Operand(zero_reg));
+  }
+  push(ra);
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(ra);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
   }
 }
 
@@ -343,44 +396,6 @@
 }
 
 
-void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
-  // First of all we assign the hash seed to scratch.
-  LoadRoot(scratch, Heap::kHashSeedRootIndex);
-  SmiUntag(scratch);
-
-  // Xor original key with a seed.
-  xor_(reg0, reg0, scratch);
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  nor(scratch, reg0, zero_reg);
-  sll(at, reg0, 15);
-  addu(reg0, scratch, at);
-
-  // hash = hash ^ (hash >> 12);
-  srl(at, reg0, 12);
-  xor_(reg0, reg0, at);
-
-  // hash = hash + (hash << 2);
-  sll(at, reg0, 2);
-  addu(reg0, reg0, at);
-
-  // hash = hash ^ (hash >> 4);
-  srl(at, reg0, 4);
-  xor_(reg0, reg0, at);
-
-  // hash = hash * 2057;
-  li(scratch, Operand(2057));
-  mul(reg0, reg0, scratch);
-
-  // hash = hash ^ (hash >> 16);
-  srl(at, reg0, 16);
-  xor_(reg0, reg0, at);
-}
-
-
 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                               Register elements,
                                               Register key,
@@ -412,10 +427,36 @@
   // at   - Temporary (avoid MacroAssembler instructions also using 'at').
   Label done;
 
-  GetNumberHash(reg0, reg1);
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  nor(reg1, reg0, zero_reg);
+  sll(at, reg0, 15);
+  addu(reg0, reg1, at);
+
+  // hash = hash ^ (hash >> 12);
+  srl(at, reg0, 12);
+  xor_(reg0, reg0, at);
+
+  // hash = hash + (hash << 2);
+  sll(at, reg0, 2);
+  addu(reg0, reg0, at);
+
+  // hash = hash ^ (hash >> 4);
+  srl(at, reg0, 4);
+  xor_(reg0, reg0, at);
+
+  // hash = hash * 2057;
+  li(reg1, Operand(2057));
+  mul(reg0, reg0, reg1);
+
+  // hash = hash ^ (hash >> 16);
+  srl(at, reg0, 16);
+  xor_(reg0, reg0, at);
 
   // Compute the capacity mask.
-  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
   sra(reg1, reg1, kSmiTagSize);
   Subu(reg1, reg1, Operand(1));
 
@@ -426,12 +467,12 @@
     mov(reg2, reg0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+      Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
     }
     and_(reg2, reg2, reg1);
 
     // Scale the index by multiplying by the element size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    ASSERT(NumberDictionary::kEntrySize == 3);
     sll(at, reg2, 1);  // 2x.
     addu(reg2, reg2, at);  // reg2 = reg2 * 3.
 
@@ -439,7 +480,7 @@
     sll(at, reg2, kPointerSizeLog2);
     addu(reg2, elements, at);
 
-    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
+    lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
     if (i != kProbes - 1) {
       Branch(&done, eq, key, Operand(at));
     } else {
@@ -451,14 +492,14 @@
   // Check that the value is a normal property.
   // reg2: elements + (index * kPointerSize).
   const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   Branch(miss, ne, at, Operand(zero_reg));
 
   // Get the value at the masked, scaled index and return.
   const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+      NumberDictionary::kElementsStartOffset + kPointerSize;
   lw(result, FieldMemOperand(reg2, kValueOffset));
 }
 
@@ -719,7 +760,7 @@
   int16_t stack_offset = num_to_push * kPointerSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kPointerSize;
       sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -758,7 +799,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, stack_offset));
       stack_offset += kPointerSize;
@@ -774,7 +815,7 @@
   int16_t stack_offset = num_to_push * kDoubleSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kDoubleSize;
       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -816,7 +857,7 @@
   CpuFeatures::Scope scope(FPU);
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
       stack_offset += kDoubleSize;
@@ -826,6 +867,21 @@
 }
 
 
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+  RegList saved_regs = kJSCallerSaved | ra.bit();
+  MultiPush(saved_regs);
+  AllowExternalCallThatCantCauseGC scope(this);
+
+  // Save to a0 in case address == t0.
+  Move(a0, address);
+  PrepareCallCFunction(2, t0);
+
+  li(a1, instructions * kInstrSize);
+  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+  MultiPop(saved_regs);
+}
+
+
 void MacroAssembler::Ext(Register rt,
                          Register rs,
                          uint16_t pos,
@@ -854,34 +910,21 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size < 32);
+  ASSERT(pos + size <= 32);
+  ASSERT(size != 0);
 
   if (mips32r2) {
     ins_(rt, rs, pos, size);
   } else {
     ASSERT(!rt.is(t8) && !rs.is(t8));
-
-    srl(t8, rt, pos + size);
-    // The left chunk from rt that needs to
-    // be saved is on the right side of t8.
-    sll(at, t8, pos + size);
-    // The 'at' register now contains the left chunk on
-    // the left (proper position) and zeroes.
-    sll(t8, rt, 32 - pos);
-    // t8 now contains the right chunk on the left and zeroes.
-    srl(t8, t8, 32 - pos);
-    // t8 now contains the right chunk on
-    // the right (proper position) and zeroes.
-    or_(rt, at, t8);
-    // rt now contains the left and right chunks from the original rt
-    // in their proper position and zeroes in the middle.
-    sll(t8, rs, 32 - size);
-    // t8 now contains the chunk from rs on the left and zeroes.
-    srl(t8, t8, 32 - size - pos);
-    // t8 now contains the original chunk from rs in
-    // the middle (proper position).
-    or_(rt, rt, t8);
-    // rt now contains the result of the ins instruction in R2 mode.
+    Subu(at, zero_reg, Operand(1));
+    srl(at, at, 32 - size);
+    and_(t8, rs, at);
+    sll(t8, t8, pos);
+    sll(at, at, pos);
+    nor(at, at, zero_reg);
+    and_(at, rt, at);
+    or_(rt, t8, at);
   }
 }
 
@@ -952,11 +995,9 @@
   mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
   // Test if scratch > fd.
-  c(OLT, D, fd, scratch);
-
-  Label simple_convert;
   // If fd < 2^31 we can convert it normally.
-  bc1t(&simple_convert);
+  Label simple_convert;
+  BranchF(&simple_convert, NULL, lt, fd, scratch);
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
@@ -976,6 +1017,102 @@
 }
 
 
+void MacroAssembler::BranchF(Label* target,
+                             Label* nan,
+                             Condition cc,
+                             FPURegister cmp1,
+                             FPURegister cmp2,
+                             BranchDelaySlot bd) {
+  if (cc == al) {
+    Branch(bd, target);
+    return;
+  }
+
+  ASSERT(nan || target);
+  // Check for unordered (NaN) cases.
+  if (nan) {
+    c(UN, D, cmp1, cmp2);
+    bc1t(nan);
+  }
+
+  if (target) {
+    // Here NaN cases were either handled by this function or are assumed to
+    // have been handled by the caller.
+    // Unsigned conditions are treated as their signed counterpart.
+    switch (cc) {
+      case Uless:
+      case less:
+        c(OLT, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case Ugreater:
+      case greater:
+        c(ULE, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Ugreater_equal:
+      case greater_equal:
+        c(ULT, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Uless_equal:
+      case less_equal:
+        c(OLE, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case eq:
+        c(EQ, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case ne:
+        c(EQ, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      default:
+        CHECK(0);
+    };
+  }
+
+  if (bd == PROTECT) {
+    nop();
+  }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+  ASSERT(CpuFeatures::IsEnabled(FPU));
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation zero(0.0);
+  DoubleRepresentation value(imm);
+  // Handle special values first.
+  bool force_load = dst.is(kDoubleRegZero);
+  if (value.bits == zero.bits && !force_load) {
+    mov_d(dst, kDoubleRegZero);
+  } else if (value.bits == minus_zero.bits && !force_load) {
+    neg_d(dst, kDoubleRegZero);
+  } else {
+    uint32_t lo, hi;
+    DoubleAsTwoUInt32(imm, &lo, &hi);
+    // Move the low part of the double into the lower of the corresponding FPU
+    // register of FPU register pair.
+    if (lo != 0) {
+      li(at, Operand(lo));
+      mtc1(at, dst);
+    } else {
+      mtc1(zero_reg, dst);
+    }
+    // Move the high part of the double into the higher of the corresponding FPU
+    // register of FPU register pair.
+    if (hi != 0) {
+      li(at, Operand(hi));
+      mtc1(at, dst.high());
+    } else {
+      mtc1(zero_reg, dst.high());
+    }
+  }
+}
+
+
 // Tries to get a signed int32 out of a double precision floating point heap
 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
 // 32bits signed integer range.
@@ -1074,6 +1211,53 @@
 }
 
 
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+                                     FPURegister result,
+                                     DoubleRegister double_input,
+                                     Register scratch1,
+                                     Register except_flag,
+                                     CheckForInexactConversion check_inexact) {
+  ASSERT(CpuFeatures::IsSupported(FPU));
+  CpuFeatures::Scope scope(FPU);
+
+  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
+
+  if (check_inexact == kDontCheckForInexactConversion) {
+    // Ingore inexact exceptions.
+    except_mask &= ~kFCSRInexactFlagMask;
+  }
+
+  // Save FCSR.
+  cfc1(scratch1, FCSR);
+  // Disable FPU exceptions.
+  ctc1(zero_reg, FCSR);
+
+  // Do operation based on rounding mode.
+  switch (rounding_mode) {
+    case kRoundToNearest:
+      round_w_d(result, double_input);
+      break;
+    case kRoundToZero:
+      trunc_w_d(result, double_input);
+      break;
+    case kRoundToPlusInf:
+      ceil_w_d(result, double_input);
+      break;
+    case kRoundToMinusInf:
+      floor_w_d(result, double_input);
+      break;
+  }  // End of switch-statement.
+
+  // Retrieve FCSR.
+  cfc1(except_flag, FCSR);
+  // Restore FCSR.
+  ctc1(scratch1, FCSR);
+
+  // Check for fpu exceptions.
+  And(except_flag, except_flag, Operand(except_mask));
+}
+
+
 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
                                                  Register input_high,
                                                  Register input_low,
@@ -1160,22 +1344,21 @@
                                       FPURegister double_input,
                                       FPURegister single_scratch,
                                       Register scratch,
-                                      Register input_high,
-                                      Register input_low) {
+                                      Register scratch2,
+                                      Register scratch3) {
   CpuFeatures::Scope scope(FPU);
-  ASSERT(!input_high.is(result));
-  ASSERT(!input_low.is(result));
-  ASSERT(!input_low.is(input_high));
+  ASSERT(!scratch2.is(result));
+  ASSERT(!scratch3.is(result));
+  ASSERT(!scratch3.is(scratch2));
   ASSERT(!scratch.is(result) &&
-         !scratch.is(input_high) &&
-         !scratch.is(input_low));
+         !scratch.is(scratch2) &&
+         !scratch.is(scratch3));
   ASSERT(!single_scratch.is(double_input));
 
   Label done;
   Label manual;
 
   // Clear cumulative exception flags and save the FCSR.
-  Register scratch2 = input_high;
   cfc1(scratch2, FCSR);
   ctc1(zero_reg, FCSR);
   // Try a conversion to a signed integer.
@@ -1192,6 +1375,8 @@
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Load the double value and perform a manual truncation.
+  Register input_high = scratch2;
+  Register input_low = scratch3;
   Move(input_low, input_high, double_input);
   EmitOutOfInt32RangeTruncate(result,
                               input_high,
@@ -1223,15 +1408,6 @@
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
 
-bool MacroAssembler::UseAbsoluteCodePointers() {
-  if (is_trampoline_emitted()) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-
 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
   BranchShort(offset, bdslot);
 }
@@ -1245,11 +1421,18 @@
 
 
 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, bdslot);
+    } else {
+      Jr(L, bdslot);
+    }
   } else {
-    BranchShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jr(L, bdslot);
+    } else {
+      BranchShort(L, bdslot);
+    }
   }
 }
 
@@ -1257,15 +1440,26 @@
 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
                             const Operand& rt,
                             BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
@@ -1288,8 +1482,8 @@
   Register scratch = at;
 
   if (rt.is_reg()) {
-    // We don't want any other register but scratch clobbered.
-    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+    // rt.
     r2 = rt.rm_;
     switch (cond) {
       case cc_always:
@@ -1791,11 +1985,18 @@
 
 
 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jalr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, bdslot);
+    } else {
+      Jalr(L, bdslot);
+    }
   } else {
-    BranchAndLinkShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jalr(L, bdslot);
+    } else {
+      BranchAndLinkShort(L, bdslot);
+    }
   }
 }
 
@@ -1803,15 +2004,26 @@
 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
                                    const Operand& rt,
                                    BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jalr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
@@ -2318,10 +2530,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   mov(a0, zero_reg);
   li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -2332,60 +2544,50 @@
 // Exception handling.
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+                                    HandlerType type,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The return address is passed in register ra.
+  // For the JSEntry handler, we must preserve a0-a3 and s0.
+  // t1-t3 are available. We will build up the handler from the bottom by
+  // pushing on the stack. First compute the state.
+  unsigned state = StackHandler::OffsetField::encode(handler_index);
   if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      li(t0, Operand(StackHandler::TRY_CATCH));
-    } else {
-      li(t0, Operand(StackHandler::TRY_FINALLY));
-    }
-    // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    lw(t1, MemOperand(t2));
-
-    addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
-    sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
-    sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
-    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
-    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
-    // Link this handler as the new current one.
-    sw(sp, MemOperand(t2));
-
+    state |= (type == TRY_CATCH_HANDLER)
+        ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+        : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
   } else {
-    // Must preserve a0-a3, and s0 (argv).
     ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for fp. We expect the code throwing an exception to check fp
-    // before dereferencing it to restore the context.
-    li(t0, Operand(StackHandler::ENTRY));
-
-    // Save the current handler as the next handler.
-    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-    lw(t1, MemOperand(t2));
-
-    ASSERT(Smi::FromInt(0) == 0);  // Used for no context.
-
-    addiu(sp, sp, -StackHandlerConstants::kSize);
-    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
-    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
-    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
-    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
-    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
-    // Link this handler as the new current one.
-    sw(sp, MemOperand(t2));
+    state |= StackHandler::KindField::encode(StackHandler::ENTRY);
   }
+
+  // Set up the code object (t1) and the state (t2) for pushing.
+  li(t1, Operand(CodeObject()));
+  li(t2, Operand(state));
+
+  // Push the frame pointer, context, state, and code object.
+  if (try_location == IN_JAVASCRIPT) {
+    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
+  } else {
+    ASSERT_EQ(Smi::FromInt(0), 0);
+    // The second zero_reg indicates no context.
+    // The first zero_reg is the NULL frame pointer.
+    // The operands are reversed to match the order of MultiPush/Pop.
+    Push(zero_reg, zero_reg, t2, t1);
+  }
+
+  // Link the current handler as the next handler.
+  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  lw(t1, MemOperand(t2));
+  push(t1);
+  // Set this new handler as the current one.
+  sw(sp, MemOperand(t2));
 }
 
 
@@ -2398,19 +2600,36 @@
 }
 
 
-void MacroAssembler::Throw(Register value) {
-  // v0 is expected to hold the exception.
-  Move(v0, value);
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // v0 = exception, a1 = code object, a2 = state.
+  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
+  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
+  sll(a2, a2, kPointerSizeLog2);
+  Addu(a2, a3, a2);
+  lw(a2, MemOperand(a2));  // Smi-tagged offset.
+  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  sra(t9, a2, kSmiTagSize);
+  Addu(t9, t9, a1);
+  Jump(t9);  // Jump.
+}
 
+
+void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // Drop the sp to the top of the handler.
+  // The exception is expected in v0.
+  Move(v0, value);
+
+  // Drop the stack pointer to the top of the top handler.
   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
                                    isolate())));
   lw(sp, MemOperand(a3));
@@ -2419,44 +2638,19 @@
   pop(a2);
   sw(a2, MemOperand(a3));
 
-  // Restore context and frame pointer, discard state (a3).
-  MultiPop(a3.bit() | cp.bit() | fp.bit());
+  // Get the code object (a1) and state (a2).  Restore the context and frame
+  // pointer.
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
   Label done;
-  Branch(&done, eq, fp, Operand(zero_reg));
+  Branch(&done, eq, cp, Operand(zero_reg));
   sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   bind(&done);
 
-#ifdef DEBUG
-  // When emitting debug_code, set ra as return address for the jump.
-  // 5 instructions: add: 1, pop: 2, jump: 2.
-  const int kOffsetRaInstructions = 5;
-  Label find_ra;
-
-  if (emit_debug_code()) {
-    // Compute ra for the Jump(t9).
-    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
-
-    // This branch-and-link sequence is needed to get the current PC on mips,
-    // saved to the ra register. Then adjusted for instruction count.
-    bal(&find_ra);  // bal exposes branch-delay.
-    nop();  // Branch delay slot nop.
-    bind(&find_ra);
-    addiu(ra, ra, kOffsetRaBytes);
-  }
-#endif
-
-  pop(t9);  // 2 instructions: lw, add sp.
-  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
-
-  if (emit_debug_code()) {
-    // Make sure that the expected number of instructions were generated.
-    ASSERT_EQ(kOffsetRaInstructions,
-              InstructionsGeneratedSince(&find_ra));
-  }
+  JumpToHandlerEntry();
 }
 
 
@@ -2465,39 +2659,16 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // v0 is expected to hold the exception.
-  Move(v0, value);
-
-  // Drop sp to the top stack handler.
-  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-  lw(sp, MemOperand(a3));
-
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  lw(a2, MemOperand(sp, kStateOffset));
-  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  lw(sp, MemOperand(sp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
-
-  // Set the top handler address to next handler past the current ENTRY handler.
-  pop(a2);
-  sw(a2, MemOperand(a3));
-
+  // The exception is expected in v0.
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
-    ExternalReference external_caught(
-           Isolate::kExternalCaughtExceptionAddress, isolate());
+    ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                      isolate());
     li(a0, Operand(false, RelocInfo::NONE));
     li(a2, Operand(external_caught));
     sw(a0, MemOperand(a2));
@@ -2506,45 +2677,37 @@
     Failure* out_of_memory = Failure::OutOfMemoryException();
     li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
     li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                        isolate())));
+                                     isolate())));
     sw(v0, MemOperand(a2));
+  } else if (!value.is(v0)) {
+    mov(v0, value);
   }
 
-  // Stack layout at this point. See also StackHandlerConstants.
-  // sp ->   state (ENTRY)
-  //         cp
-  //         fp
-  //         ra
+  // Drop the stack pointer to the top of the top stack handler.
+  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  lw(sp, MemOperand(a3));
 
-  // Restore context and frame pointer, discard state (r2).
-  MultiPop(a2.bit() | cp.bit() | fp.bit());
+  // Unwind the handlers until the ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind);
+  bind(&fetch_next);
+  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
 
-#ifdef DEBUG
-  // When emitting debug_code, set ra as return address for the jump.
-  // 5 instructions: add: 1, pop: 2, jump: 2.
-  const int kOffsetRaInstructions = 5;
-  Label find_ra;
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::ENTRY == 0);
+  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  And(a2, a2, Operand(StackHandler::KindField::kMask));
+  Branch(&fetch_next, ne, a2, Operand(zero_reg));
 
-  if (emit_debug_code()) {
-    // Compute ra for the Jump(t9).
-    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(a2);
+  sw(a2, MemOperand(a3));
 
-    // This branch-and-link sequence is needed to get the current PC on mips,
-    // saved to the ra register. Then adjusted for instruction count.
-    bal(&find_ra);  // bal exposes branch-delay slot.
-    nop();  // Branch delay slot nop.
-    bind(&find_ra);
-    addiu(ra, ra, kOffsetRaBytes);
-  }
-#endif
-  pop(t9);  // 2 instructions: lw, add sp.
-  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+  // Get the code object (a1) and state (a2).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
 
-  if (emit_debug_code()) {
-    // Make sure that the expected number of instructions were generated.
-    ASSERT_EQ(kOffsetRaInstructions,
-              InstructionsGeneratedSince(&find_ra));
-  }
+  JumpToHandlerEntry();
 }
 
 
@@ -2647,6 +2810,7 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
+  ASSERT(!object_size.is(t9));
   ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
 
   // Check relative positions of allocation top and limit addresses.
@@ -2984,15 +3148,140 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  Branch(&entry);
+  bind(&loop);
+  sw(filler, MemOperand(start_offset));
+  Addu(start_offset, start_offset, kPointerSize);
+  bind(&entry);
+  Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, ls, scratch,
+         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Register scratch,
+                                              Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+                                                 Register key_reg,
+                                                 Register receiver_reg,
+                                                 Register elements_reg,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Register scratch3,
+                                                 Register scratch4,
+                                                 Label* fail) {
+  Label smi_value, maybe_nan, have_double_value, is_nan, done;
+  Register mantissa_reg = scratch2;
+  Register exponent_reg = scratch3;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg,
+           scratch1,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  bind(&have_double_value);
+  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  Addu(scratch1, scratch1, elements_reg);
+  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  sw(exponent_reg, FieldMemOperand(scratch1, offset));
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
+  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+  bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  jmp(&have_double_value);
+
+  bind(&smi_value);
+  Addu(scratch1, elements_reg,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  Addu(scratch1, scratch1, scratch2);
+  // scratch1 is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(FPU)) {
+    destination = FloatingPointHelper::kFPURegisters;
+  } else {
+    destination = FloatingPointHelper::kCoreRegisters;
+  }
+
+  Register untagged_value = receiver_reg;
+  SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(this,
+                                          untagged_value,
+                                          destination,
+                                          f0,
+                                          mantissa_reg,
+                                          exponent_reg,
+                                          scratch4,
+                                          f2);
+  if (destination == FloatingPointHelper::kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    sdc1(f0, MemOperand(scratch1, 0));
+  } else {
+    sw(mantissa_reg, MemOperand(scratch1, 0));
+    sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
@@ -3183,13 +3472,18 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
                  call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
     SetCallKind(t1, call_kind);
     Call(code);
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     SetCallKind(t1, call_kind);
@@ -3207,6 +3501,9 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -3229,6 +3526,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -3247,24 +3547,23 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Get the function and setup the context.
-  li(a1, Operand(Handle<JSFunction>(function)));
+  li(a1, Operand(function));
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
 }
 
 
@@ -3305,7 +3604,8 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -3313,6 +3613,16 @@
   GetObjectType(function, result, scratch);
   Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
+  if (miss_on_bound_function) {
+    lw(scratch,
+       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    lw(scratch,
+       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    And(scratch, scratch,
+        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+    Branch(miss, ne, scratch, Operand(zero_reg));
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -3361,51 +3671,24 @@
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
-                                         Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
-      kNoASTId, cond, r1, r2);
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
-                                             Condition cond,
-                                             Register r1,
-                                             const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
-  Object* result;
-  { MaybeObject* maybe_result = stub->TryGetCode();
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
-  return result;
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ExternalReference function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
+                                              int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -3476,11 +3759,10 @@
   Ret();
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallExternalReference(
-      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+      0,
+      1);
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -3493,8 +3775,12 @@
       1);
   mov(v0, s0);
   jmp(&leave_exit_frame);
+}
 
-  return result;
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
 }
 
 
@@ -3578,7 +3864,16 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
+
+  if (left.is(right) && dst.is(left)) {
+    ASSERT(!dst.is(t9));
+    ASSERT(!scratch.is(t9));
+    ASSERT(!left.is(t9));
+    ASSERT(!right.is(t9));
+    ASSERT(!overflow_dst.is(t9));
+    mov(t9, right);
+    right = t9;
+  }
 
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
@@ -3611,10 +3906,17 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
   ASSERT(!scratch.is(left));
   ASSERT(!scratch.is(right));
 
+  // This happens with some crankshaft code. Since Subu works fine if
+  // left == right, let's not make that restriction here.
+  if (left.is(right)) {
+    mov(dst, zero_reg);
+    mov(overflow_dst, zero_reg);
+    return;
+  }
+
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
     subu(dst, left, right);  // Left is overwritten.
@@ -3663,8 +3965,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   li(a0, Operand(function->nargs));
   li(a1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1);
-  stub.SaveDoubles();
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -3696,17 +3997,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  li(a0, num_arguments);
-  return TryJumpToExternalReference(ext);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -3723,17 +4013,12 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& builtin) {
-  li(a1, Operand(builtin));
-  CEntryStub stub(1);
-  return TryTailCallStub(&stub);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(t9, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(t9));
@@ -3866,14 +4151,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
   push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
   push(a0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -4126,8 +4417,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(1, kSmiTagMask);
   or_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+  JumpIfNotSmi(at, on_not_both_smi);
 }
 
 
@@ -4138,8 +4428,7 @@
   ASSERT_EQ(1, kSmiTagMask);
   // Both Smi tags must be 1 (not Smi).
   and_(at, reg1, reg2);
-  andi(at, at, kSmiTagMask);
-  Branch(on_either_smi, eq, at, Operand(zero_reg));
+  JumpIfSmi(at, on_either_smi);
 }
 
 
@@ -4217,8 +4506,7 @@
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   And(scratch1, first, Operand(second));
-  And(scratch1, scratch1, Operand(kSmiTagMask));
-  Branch(failure, eq, scratch1, Operand(zero_reg));
+  JumpIfSmi(scratch1, failure);
   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
                                              second,
                                              scratch1,
@@ -4257,7 +4545,23 @@
 
 static const int kRegisterPassedArguments = 4;
 
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  num_reg_arguments += 2 * num_double_arguments;
+
+  // Up to four simple arguments are passed in registers a0..a3.
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  stack_passed_words += kCArgSlotCount;
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
   // Up to four simple arguments are passed in registers a0..a3.
@@ -4265,9 +4569,8 @@
   // mips, even though those argument slots are not normally used.
   // Remaining arguments are pushed on the stack, above (higher address than)
   // the argument slots.
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                 0 : num_arguments - kRegisterPassedArguments) +
-                                kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -4282,26 +4585,43 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
 void MacroAssembler::CallCFunction(ExternalReference function,
-                                   int num_arguments) {
-  CallCFunctionHelper(no_reg, function, t8, num_arguments);
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  li(t8, Operand(function));
+  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   Register scratch,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_arguments);
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         ExternalReference function_reference,
-                                         Register scratch,
-                                         int num_arguments) {
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -4329,19 +4649,15 @@
   // allow preemption, so the return address in the link register
   // stays correct.
 
-  if (function.is(no_reg)) {
-    function = t9;
-    li(function, Operand(function_reference));
-  } else if (!function.is(t9)) {
+  if (!function.is(t9)) {
     mov(t9, function);
     function = t9;
   }
 
   Call(function);
 
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                0 : num_arguments - kRegisterPassedArguments) +
-                               kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
 
   if (OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4354,6 +4670,263 @@
 #undef BRANCH_ARGS_CHECK
 
 
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+                                         Register scratch,
+                                         Register new_value) {
+  lw(scratch, MemOperand(li_location));
+  // At this point scratch is a lui(at, ...) instruction.
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be a lui.",
+        scratch, Operand(LUI));
+    lw(scratch, MemOperand(li_location));
+  }
+  srl(t9, new_value, kImm16Bits);
+  Ins(scratch, t9, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location));
+
+  lw(scratch, MemOperand(li_location, kInstrSize));
+  // scratch is now ori(at, ...).
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be an ori.",
+        scratch, Operand(ORI));
+    lw(scratch, MemOperand(li_location, kInstrSize));
+  }
+  Ins(scratch, new_value, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location, kInstrSize));
+
+  // Update the I-cache so the new lui and ori can be executed.
+  FlushICache(li_location, 2);
+}
+
+void MacroAssembler::GetRelocatedValue(Register li_location,
+                                       Register value,
+                                       Register scratch) {
+  lw(value, MemOperand(li_location));
+  if (emit_debug_code()) {
+    And(value, value, kOpcodeMask);
+    Check(eq, "The instruction should be a lui.",
+        value, Operand(LUI));
+    lw(value, MemOperand(li_location));
+  }
+
+  // value now holds a lui instruction. Extract the immediate.
+  sll(value, value, kImm16Bits);
+
+  lw(scratch, MemOperand(li_location, kInstrSize));
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction should be an ori.",
+        scratch, Operand(ORI));
+    lw(scratch, MemOperand(li_location, kInstrSize));
+  }
+  // "scratch" now holds an ori instruction. Extract the immediate.
+  andi(scratch, scratch, kImm16Mask);
+
+  // Merge the results.
+  or_(value, value, scratch);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  And(scratch, object, Operand(~Page::kPageAlignmentMask));
+  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  And(scratch, scratch, Operand(mask));
+  Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+  // Shift left 1 by adding.
+  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
+  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+  jmp(&other_color);
+
+  bind(&word_boundary);
+  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+  And(t9, t9, Operand(1));
+  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  ASSERT(!AreAliased(value, scratch, t8, no_reg));
+  Label is_data_object;
+  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  Branch(&is_data_object, eq, t8, Operand(scratch));
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(not_data_object, ne, t8, Operand(zero_reg));
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  sll(t8, t8, kPointerSizeLog2);
+  Addu(bitmap_reg, bitmap_reg, t8);
+  li(t8, Operand(1));
+  sllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, mask_scratch, load_scratch);
+  Branch(&done, ne, t8, Operand(zero_reg));
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // sll may overflow, making the check conservative.
+    sll(t8, mask_scratch, 1);
+    And(t8, load_scratch, t8);
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  {
+    Label skip;
+    Branch(&skip, ne, t8, Operand(map));
+    li(length, HeapNumber::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  And(t8, instance_type, Operand(kExternalStringTag));
+  {
+    Label skip;
+    Branch(&skip, eq, t8, Operand(zero_reg));
+    li(length, ExternalString::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Sequential string, either ASCII or UC16.
+  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  lw(t9, FieldMemOperand(value, String::kLengthOffset));
+  And(t8, instance_type, Operand(kStringEncodingMask));
+  {
+    Label skip;
+    Branch(&skip, eq, t8, Operand(zero_reg));
+    srl(t9, t9, 1);
+    bind(&skip);
+  }
+  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  And(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  Or(t8, t8, Operand(mask_scratch));
+  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  Addu(t8, t8, Operand(length));
+  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
 void MacroAssembler::LoadInstanceDescriptors(Register map,
                                              Register descriptors) {
   lw(descriptors,
@@ -4365,6 +4938,60 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  ASSERT(!output_reg.is(input_reg));
+  Label done;
+  li(output_reg, Operand(255));
+  // Normal branch: nop in delay slot.
+  Branch(&done, gt, input_reg, Operand(output_reg));
+  // Use delay slot in this branch.
+  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+  mov(output_reg, zero_reg);  // In delay slot.
+  mov(output_reg, input_reg);  // Value is in range 0..255.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  Move(temp_double_reg, 0.0);
+  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, zero_reg);
+  Branch(&done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  Move(temp_double_reg, 255.0);
+  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+  li(result_reg, Operand(255));
+  Branch(&done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  round_w_d(temp_double_reg, input_reg);
+  mfc1(result_reg, temp_double_reg);
+  bind(&done);
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index c968ffc..454fe9e 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -50,15 +50,16 @@
 // trying to update gp register for position-independent-code. Whenever
 // MIPS generated code calls C code, it must be via t9 register.
 
-// Registers aliases
+
+// Register aliases.
 // cp is assumed to be a callee saved register.
+const Register lithiumScratchReg = s3;  // Scratch register.
+const Register lithiumScratchReg2 = s4;  // Scratch register.
+const Register condReg = s5;  // Simulated (partial) condition code for mips.
 const Register roots = s6;  // Roots array pointer.
 const Register cp = s7;     // JavaScript context pointer.
 const Register fp = s8_fp;  // Alias for fp.
-// Registers used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
+const DoubleRegister lithiumScratchDouble = f30;  // Double scratch register.
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -90,6 +91,43 @@
   PROTECT
 };
 
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -138,21 +176,22 @@
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Register target, COND_ARGS);
+  static int CallSize(Register target, COND_ARGS);
   void Call(Register target, COND_ARGS);
-  int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Handle<Code> code,
-               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-               unsigned ast_id = kNoASTId,
-               COND_ARGS);
+  static int CallSize(Handle<Code> code,
+                      RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+                      unsigned ast_id = kNoASTId,
+                      COND_ARGS);
   void Call(Handle<Code> code,
             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
             unsigned ast_id = kNoASTId,
             COND_ARGS);
   void Ret(COND_ARGS);
-  inline void Ret(BranchDelaySlot bd) {
-    Ret(al, zero_reg, Operand(zero_reg), bd);
+  inline void Ret(BranchDelaySlot bd, Condition cond = al,
+    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+    Ret(cond, rs, rt, bd);
   }
 
 #undef COND_ARGS
@@ -197,6 +236,8 @@
     mtc1(src_high, FPURegister::from_code(dst.code() + 1));
   }
 
+  void Move(FPURegister dst, double imm);
+
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
@@ -206,6 +247,7 @@
     Branch(L);
   }
 
+
   // Load an object from the root table.
   void LoadRoot(Register destination,
                 Heap::RootListIndex index);
@@ -221,39 +263,127 @@
                  Condition cond, Register src1, const Operand& src2);
 
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,  // eq for new space, ne otherwise.
-                  Label* branch);
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
+
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
 
-  // For the page containing |object| mark the region covering [address]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  void RecordWriteHelper(Register object,
-                         Register address,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // For the page containing |object| mark the region covering
-  // [object+offset] dirty. The object address must be in the first 8K
-  // of an allocated page.  The 'scratch' registers are used in the
-  // implementation and all 3 registers are clobbered by the
-  // operation, as well as the 'at' register. RecordWrite updates the
-  // write barrier even when storing smis.
-  void RecordWrite(Register object,
-                   Operand offset,
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
                    Register scratch0,
-                   Register scratch1);
+                   Register scratch1,
+                   Label* on_black);
 
-  // For the page containing |object| mark the region covering
-  // [address] dirty. The object address must be in the first 8K of an
-  // allocated page.  All 3 registers are clobbered by the operation,
-  // as well as the ip register. RecordWrite updates the write barrier
-  // even when storing smis.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register scratch);
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, ie it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     ra_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 
   // ---------------------------------------------------------------------------
@@ -266,7 +396,6 @@
                               Register scratch,
                               Label* miss);
 
-  void GetNumberHash(Register reg0, Register scratch);
 
   void LoadFromNumberDictionary(Label* miss,
                                 Register elements,
@@ -518,6 +647,14 @@
     Addu(sp, sp, 2 * kPointerSize);
   }
 
+  // Pop three registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    lw(src3, MemOperand(sp, 0 * kPointerSize));
+    lw(src2, MemOperand(sp, 1 * kPointerSize));
+    lw(src1, MemOperand(sp, 2 * kPointerSize));
+    Addu(sp, sp, 3 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
@@ -536,10 +673,17 @@
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+  // Does not handle errors.
+  void FlushICache(Register address, unsigned instructions);
+
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
+  // ---------------------------------------------------------------------------
+  // FPU macros. These do not handle special cases like NaN or +- inf.
+
   // Convert unsigned word to double.
   void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -548,6 +692,24 @@
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
+  // Wrapper function for the different cmp/branch types.
+  void BranchF(Label* target,
+               Label* nan,
+               Condition cc,
+               FPURegister cmp1,
+               FPURegister cmp2,
+               BranchDelaySlot bd = PROTECT);
+
+  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+  inline void BranchF(BranchDelaySlot bd,
+                      Label* target,
+                      Label* nan,
+                      Condition cc,
+                      FPURegister cmp1,
+                      FPURegister cmp2) {
+    BranchF(target, nan, cc, cmp1, cmp2, bd);
+  };
+
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
   // to not_int32 label. If FPU is available double_scratch is used but not
@@ -559,6 +721,18 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Truncates a double using a specific rounding mode.
+  // The except_flag will contain any exceptions caused by the instruction.
+  // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+  // exception is masked.
+  void EmitFPUTruncate(FPURoundingMode rounding_mode,
+                       FPURegister result,
+                       DoubleRegister double_input,
+                       Register scratch1,
+                       Register except_flag,
+                       CheckForInexactConversion check_inexact
+                           = kDontCheckForInexactConversion);
+
   // Helper for EmitECMATruncate.
   // This will truncate a floating-point value outside of the singed 32bit
   // integer range to a 32bit signed integer.
@@ -580,15 +754,6 @@
                         Register scratch2,
                         Register scratch3);
 
-  // -------------------------------------------------------------------------
-  // Activation frames.
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // argc - argument count to be dropped by LeaveExitFrame.
   // save_doubles - saves FPU registers on stack, currently disabled.
@@ -615,6 +780,7 @@
                                     Register map,
                                     Register scratch);
 
+
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
@@ -646,7 +812,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       CallKind call_kind);
@@ -672,19 +838,14 @@
   void DebugBreak();
 #endif
 
-  void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    li(kRootRegister, Operand(roots_address));
-  }
 
   // -------------------------------------------------------------------------
   // Exception handling.
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register ra.
-  // Clobber t0, t1, t2.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  void PushTryHandler(CodeLocation try_location,
+                      HandlerType type,
+                      int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
@@ -708,6 +869,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -719,7 +887,8 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   void GetObjectType(Register function,
                      Register map,
@@ -731,6 +900,31 @@
                          Register scratch,
                          Label* fail);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Register scratch,
+                                Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register value_reg,
+                                   Register key_reg,
+                                   Register receiver_reg,
+                                   Register elements_reg,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   Register scratch4,
+                                   Label* fail);
+
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
@@ -760,6 +954,21 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type,
+                               Register result) {
+    lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    And(type, type, Operand(kIsNotStringMask));
+    ASSERT_EQ(0, kStringTag);
+    return eq;
+  }
+
+
   // Picks out an array index from the hash field.
   // Register use:
   //   hash - holds the index's hash. Clobbered.
@@ -833,27 +1042,9 @@
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
-                                           Condition cond = cc_always,
-                                           Register r1 = zero_reg,
-                                           const Operand& r2 =
-                                               Operand(zero_reg));
-
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
-                                               Condition cond = cc_always,
-                                               Register r1 = zero_reg,
-                                               const Operand& r2 =
-                                                   Operand(zero_reg));
-
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -874,17 +1065,14 @@
                                  int num_arguments,
                                  int result_size);
 
-  // Tail call of a runtime routine (jump). Try to generate the code if
-  // necessary. Do not perform a GC but instead return a retry after GC
-  // failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
   // Before calling a C-function from generated code, align arguments on stack
   // and add space for the four mips argument slots.
   // After aligning the frame, non-register arguments must be stored on the
@@ -894,7 +1082,11 @@
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_arguments, Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
 
   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
   // Arguments 5..n are stored to stack using following:
@@ -906,7 +1098,13 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
   // There are two ways of passing double arguments on MIPS, depending on
@@ -917,16 +1115,15 @@
   void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
   void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
-  // Calls an API function. Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions. Restores context.
-  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
-                                           int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call js arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
-
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -982,6 +1179,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // Number utilities.
@@ -1009,6 +1209,13 @@
     Addu(reg, reg, reg);
   }
 
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow) {
+    mov(overflow, reg);  // Save original value.
+    addu(reg, reg, reg);
+    xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
+  }
+
   void SmiTag(Register dst, Register src) {
     Addu(dst, src, src);
   }
@@ -1023,10 +1230,11 @@
 
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label,
-                        Register scratch = at) {
+                        Register scratch = at,
+                        BranchDelaySlot bd = PROTECT) {
     ASSERT_EQ(0, kSmiTag);
     andi(scratch, value, kSmiTagMask);
-    Branch(smi_label, eq, scratch, Operand(zero_reg));
+    Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
   }
 
   // Jump if the register contains a non-smi.
@@ -1096,13 +1304,33 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Patch the relocated value (lui/ori pair).
+  void PatchRelocatedValue(Register li_location,
+                           Register scratch,
+                           Register new_value);
+  // Get the relocatad value (loaded data) from the lui/ori pair.
+  void GetRelocatedValue(Register li_location,
+                         Register value,
+                         Register scratch);
+
  private:
   void CallCFunctionHelper(Register function,
-                           ExternalReference function_reference,
-                           Register scratch,
-                           int num_arguments);
+                           int num_reg_arguments,
+                           int num_double_arguments);
 
   void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
   void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1138,25 +1366,37 @@
   // the function in the 'resolved' flag.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
-  bool UseAbsoluteCodePointers();
-
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1197,34 +1437,6 @@
 };
 
 
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
-  return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand()  {
-  return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
-  ASSERT(index > kCArgSlotCount);
-  // Argument 5 takes the slot just past the four Arg-slots.
-  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
-  return MemOperand(sp, offset);
-}
-
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 63e836f..cb210fe 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -377,9 +377,12 @@
     // Isolate.
     __ li(a3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Restore regexp engine registers.
     __ MultiPop(regexp_registers_to_retain);
@@ -607,6 +610,12 @@
 
     // Entry code:
     __ bind(&entry_label_);
+
+    // Tell the system that we have a stack frame.  Because the type is MANUAL,
+    // no is generated.
+    FrameScope scope(masm_, StackFrame::MANUAL);
+
+    // Actually emit code to start a new stack frame.
     // Push arguments
     // Save callee-save registers.
     // Start new stack frame.
@@ -1103,6 +1112,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1244,13 +1258,14 @@
   if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
   // Stack is already aligned for call, so decrement by alignment
   // to make room for storing the return address.
-  __ Subu(sp, sp, Operand(stack_alignment));
-  __ sw(ra, MemOperand(sp, 0));
-  __ mov(a0, sp);
+  __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+  const int return_address_offset = kCArgsSlotsSize;
+  __ Addu(a0, sp, return_address_offset);
+  __ sw(ra, MemOperand(a0, 0));
   __ mov(t9, t1);
   __ Call(t9);
-  __ lw(ra, MemOperand(sp, 0));
-  __ Addu(sp, sp, Operand(stack_alignment));
+  __ lw(ra, MemOperand(sp, return_address_offset));
+  __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
   __ Jump(ra);
 }
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 17c1897..f70775d 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -72,7 +72,7 @@
 // code.
 class MipsDebugger {
  public:
-  explicit MipsDebugger(Simulator* sim);
+  explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
   ~MipsDebugger();
 
   void Stop(Instruction* instr);
@@ -105,10 +105,6 @@
   void RedoBreakpoints();
 };
 
-MipsDebugger::MipsDebugger(Simulator* sim) {
-  sim_ = sim;
-}
-
 
 MipsDebugger::~MipsDebugger() {
 }
@@ -391,6 +387,13 @@
     if (line == NULL) {
       break;
     } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -757,7 +760,6 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
-    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -791,6 +793,12 @@
 }
 
 
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -911,6 +919,8 @@
   for (int i = 0; i < kNumExceptions; i++) {
     exceptions[i] = 0;
   }
+
+  last_debugger_input_ = NULL;
 }
 
 
@@ -1359,9 +1369,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 512 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 256;
+  return reinterpret_cast<uintptr_t>(stack_) + 512;
 }
 
 
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 69dddfa..ba625f4 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -221,6 +221,10 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -358,6 +362,9 @@
   int icount_;
   int break_count_;
 
+  // Debugger input.
+  char* last_debugger_input_;
+
   // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 5b94973..9f214a3 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -99,13 +99,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register scratch0,
-    Register scratch1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register scratch0,
+                                             Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -120,9 +119,8 @@
   Register map = scratch1;
   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ Branch(miss_label, ne, at, Operand(zero_reg));
-
+  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
 
   // Check that receiver is a JSObject.
   __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -140,20 +138,16 @@
   // Restore the temporarily used register.
   __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      receiver,
-      properties,
-      name,
-      scratch1);
-  if (result->IsFailure()) return result;
 
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     receiver,
+                                                     properties,
+                                                     name,
+                                                     scratch1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  return result;
 }
 
 
@@ -240,7 +234,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -248,8 +245,8 @@
   __ li(at, isolate->global());
   __ Branch(miss, ne, prototype, Operand(at));
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ li(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -261,8 +258,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -283,8 +282,7 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ And(scratch, receiver, Operand(kSmiTagMask));
-  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ GetObjectType(receiver, scratch, scratch);
@@ -370,9 +368,9 @@
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -397,11 +395,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ li(a2, Operand(Handle<Map>(transition)));
+    __ li(a2, Operand(transition));
     __ Push(a2, a0);
     __ TailCallExternalReference(
            ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -410,10 +408,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ li(t0, Operand(Handle<Map>(transition)));
+    __ li(t0, Operand(transition));
     __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -432,7 +430,13 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+    __ mov(name_reg, a0);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kRAHasNotBeenSaved,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -445,7 +449,13 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+    __ mov(name_reg, a0);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kRAHasNotBeenSaved,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register v0).
@@ -457,20 +467,15 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Object* object,
+                                 Handle<Object> object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -502,23 +507,24 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
   Register scratch = name;
-  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ li(scratch, Operand(interceptor));
   __ Push(scratch, receiver, holder);
   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -554,7 +560,7 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+static void GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
@@ -567,18 +573,18 @@
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ li(t1, Operand(Handle<JSFunction>(function)));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ li(t1, Operand(function));
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ li(a0, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ li(a0, api_call_info);
     __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
   } else {
-    __ li(t2, Operand(Handle<Object>(call_data)));
+    __ li(t2, call_data);
   }
 
   // Store js function and call data.
@@ -589,12 +595,9 @@
   // (refer to layout above).
   __ Addu(a2, sp, Operand(2 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
   const int kApiStackSpace = 4;
 
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -617,16 +620,15 @@
   // v8::Arguments::is_construct_call = 0
   __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated). Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_API_CALL,
                         masm->isolate());
-  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 class CallInterceptorCompiler BASE_EMBEDDED {
@@ -640,86 +642,63 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
-
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
-
     Counters* counters = masm->isolate()->counters();
-
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-        !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                      interceptor_holder);
+          !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                        lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                      scratch1, scratch2);
+                        scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -732,9 +711,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-      stub_compiler_->CheckPrototypes(object, receiver,
-                                      interceptor_holder, scratch1,
-                                      scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -747,10 +726,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -761,10 +741,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiDirectCall(masm,
-                                                      optimization,
-                                                      arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -785,66 +762,57 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
-
-    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
               masm->isolate()),
           5);
-
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    __ Push(holder, name_);
-
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
-
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
-
+      __ Push(holder, name_);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -861,52 +829,41 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ li(scratch, Operand(Handle<Object>(cell)));
+  __ li(scratch, Operand(cell));
   __ lw(scratch,
         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   __ Branch(miss, ne, scratch, Operand(at));
-  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
-    MacroAssembler* masm,
-    JSObject* object,
-    JSObject* holder,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  JSObject* current = object;
-  while (current != holder) {
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
     if (current->IsGlobalObject()) {
-      // Returns a cell or a failure.
-      MaybeObject* result = GenerateCheckPropertyCell(
-          masm,
-          GlobalObject::cast(current),
-          name,
-          scratch,
-          miss);
-      if (result->IsFailure()) return result;
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
     }
-    ASSERT(current->IsJSObject());
-    current = JSObject::cast(current->GetPrototype());
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
   }
-  return NULL;
 }
 
 
@@ -1030,13 +987,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1054,81 +1011,51 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    ASSERT(current->GetPrototype()->IsJSObject());
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
-        Object* lookup_result = NULL;  // Initialization to please compiler.
-        if (!maybe_lookup_result->ToObject(&lookup_result)) {
-          set_failure(Failure::cast(maybe_lookup_result));
-          return reg;
-        }
-        name = String::cast(lookup_result);
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-
-      // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-        // Restore scratch register to be the map of the object.  In the
-        // new space case below, we load the prototype from the map in
-        // the scratch register.
-        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // From now the object is in holder_reg.
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Check the map of the current object.
+      Handle<Map> current_map(current->map());
       __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       // Branch on the result of the map check.
-      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      __ Branch(miss, ne, scratch1, Operand(current_map));
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // From now the object is in holder_reg.
-      __ li(reg, Operand(Handle<JSObject>(prototype)));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (heap()->InNewSpace(*prototype)) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ li(reg, Operand(prototype));
+      }
     }
 
     if (save_at_depth == depth) {
@@ -1139,65 +1066,57 @@
     current = prototype;
   }
 
+  // Log the check depth.
+  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+
   // Check the holder map.
   __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
   __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
 
-  // Log the check depth.
-  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  };
+  }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-
-  MaybeObject* result = GenerateCheckPropertyCells(masm(),
-                                                   object,
-                                                   holder,
-                                                   name,
-                                                   scratch1,
-                                                   miss);
-  if (result->IsFailure()) set_failure(Failure::cast(result));
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ And(scratch1, receiver, Operand(kSmiTagMask));
-  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                      name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
   GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<Object> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
@@ -1208,83 +1127,77 @@
                       scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ li(v0, Operand(Handle<Object>(value)));
+  __ li(v0, Operand(value));
   __ Ret();
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
 
   // Check that the maps haven't changed.
-  Register reg =
-    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
-                    name, miss);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  Handle<AccessorInfo> callback_handle(callback);
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ li(scratch3, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ li(scratch3, callback);
     __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ li(scratch3, Handle<Object>(callback_handle->data()));
+    __ li(scratch3, Handle<Object>(callback->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
   // struct from the function (which is currently the case). This means we pass
   // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
   // will handle setting up a0.
 
   const int kApiStackSpace = 1;
-
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
   // scratch2 (internal::Object **args_) as the data.
   __ sw(a2, MemOperand(sp, kPointerSize));
   // a2 (second argument - see note above) = AccessorInfo&
   __ Addu(a2, sp, kPointerSize);
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
+  const int kStackUnwindSpace = 4;
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  // 4 args - will be freed later by LeaveExitFrame.
-  return masm()->TryCallApiFunctionAndReturn(ref, 4);
+  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1300,9 +1213,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+        lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1317,47 +1230,44 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method).
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+      // Leave the internal frame.
     }
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method).
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1369,21 +1279,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), v0, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ li(scratch2, Handle<AccessorInfo>(callback));
+      __ li(scratch2, callback);
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1419,16 +1329,16 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+    __ Branch(miss, ne, a2, Operand(name));
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1441,7 +1351,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(a0, miss);
   }
 
@@ -1450,15 +1360,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(a3, Operand(cell));
   __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1473,27 +1384,24 @@
     __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ Branch(miss, ne, t0, Operand(a3));
   } else {
-    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+    __ Branch(miss, ne, a1, Operand(function));
   }
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1513,23 +1421,23 @@
   Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
   GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1539,7 +1447,7 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
 
@@ -1555,8 +1463,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object), receiver,
-                  holder, a3, v0, t0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
+                  name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1565,10 +1473,8 @@
     __ Ret();
   } else {
     Label call_builtin;
-
     Register elements = a3;
     Register end_elements = t1;
-
     // Get the elements array of the object.
     __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
@@ -1580,7 +1486,7 @@
                 DONT_DO_SMI_CHECK);
 
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements;
 
       // Get the array's length into v0 and calculate new length.
       __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1594,29 +1500,51 @@
       // Check if we could survive without allocation.
       __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
 
+      // Check if value is a smi.
+      Label with_write_barrier;
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+
       // Save new length.
       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
       // Push the element.
-      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
       __ Addu(end_elements, elements, end_elements);
       const int kEndElementsOffset =
           FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
-      __ Addu(end_elements, end_elements, kPointerSize);
+      __ Addu(end_elements, end_elements, kEndElementsOffset);
+      __ sw(t0, MemOperand(end_elements));
 
       // Check for a smi.
-      __ JumpIfNotSmi(t0, &with_write_barrier);
-      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-      __ InNewSpace(elements, t0, eq, &exit);
-      __ RecordWriteHelper(elements, end_elements, t0);
+
+      __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(t2, t2, &call_builtin);
+
+      // Save new length.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      __ Addu(end_elements, end_elements, kEndElementsOffset);
+      __ sw(t0, MemOperand(end_elements));
+
+      __ RecordWrite(elements,
+                     end_elements,
+                     t0,
+                     kRAHasNotBeenSaved,
+                     kDontSaveFPRegs,
+                     EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1628,6 +1556,15 @@
         __ Branch(&call_builtin);
       }
 
+      __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(a2, &no_fast_elements_check);
+      __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(t3, t3, &call_builtin);
+      __ bind(&no_fast_elements_check);
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(
               masm()->isolate());
@@ -1653,8 +1590,7 @@
       // Update new_space_allocation_top.
       __ sw(t2, MemOperand(t3));
       // Push the argument.
-      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ sw(t2, MemOperand(end_elements));
+      __ sw(a2, MemOperand(end_elements));
       // Fill the rest with holes.
       __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
@@ -1679,19 +1615,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1701,25 +1637,22 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   Register receiver = a1;
   Register elements = a3;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(JSObject::cast(object),
-                  receiver, holder, elements, t0, v0, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
+                  t0, v0, name, &miss);
 
   // Get the elements array of the object.
   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1768,20 +1701,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1791,10 +1723,9 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
@@ -1802,7 +1733,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1814,13 +1745,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = a1;
   Register index = t1;
-  Register scratch = a3;
   Register result = v0;
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1829,20 +1759,19 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1853,22 +1782,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1878,21 +1806,18 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
-
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1900,14 +1825,13 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
-                  a1, a3, t0, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  v0, holder, a1, a3, t0, name, &miss);
 
   Register receiver = v0;
   Register index = t1;
-  Register scratch1 = a1;
-  Register scratch2 = a3;
+  Register scratch = a3;
   Register result = v0;
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
   if (argc > 0) {
@@ -1916,21 +1840,20 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1941,22 +1864,21 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, Handle<String>(name));
+  __ li(a2, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1969,22 +1891,23 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
 
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2000,13 +1923,13 @@
   // Convert the smi code to uint16.
   __ And(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, v0);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, v0);
+  generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
@@ -2015,19 +1938,19 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2036,30 +1959,29 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!CpuFeatures::IsSupported(FPU))
-    return heap()->undefined_value();
+  if (!CpuFeatures::IsSupported(FPU)) {
+    return Handle<Code>::null();
+  }
+
   CpuFeatures::Scope scope_fpu(FPU);
-
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2149,19 +2071,19 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* obj = GenerateMissBranch();
-  if (obj->IsFailure()) return obj;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2171,25 +2093,23 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
-
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-
-    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2251,33 +2171,32 @@
 
   __ bind(&miss);
   // a2: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
 
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2296,40 +2215,37 @@
   ReserveSpaceForFastApiCall(masm(), a0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
                   depth, &miss);
 
-  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiDirectCall(masm(), optimization, argc);
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
@@ -2342,23 +2258,20 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ And(t1, a1, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t1, Operand(zero_reg));
+    __ JumpIfSmi(a1, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
           1, a0, a3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
-                      &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
+                      name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2369,50 +2282,46 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ GetObjectType(a1, a3, a3);
         __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
       Label fast;
         // Check that the object is a smi or a heap number.
-        __ And(t1, a1, Operand(kSmiTagMask));
-        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ JumpIfSmi(a1, &fast);
         __ GetObjectType(a1, a0, a0);
         __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2423,17 +2332,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
-                        a1, t0, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            a0, holder, a3, a1, t0, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
     }
 
-    default:
-      UNREACHABLE();
-  }
-
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
@@ -2441,17 +2351,16 @@
   // Handle call cache miss.
   __ bind(&miss);
 
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -2463,71 +2372,54 @@
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ lw(a1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         a1,
-                                         a3,
-                                         t0,
-                                         a0,
-                                         &miss);
-  if (result->IsFailure()) {
-    return result;
-  }
+  CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
+                   &miss);
 
   // Move returned value, the function to call, to a1.
   __ mov(a1, v0);
   // Restore receiver.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // Undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
@@ -2544,34 +2436,31 @@
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
-  } else {
-    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
-                  JUMP_FUNCTION, call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2581,25 +2470,21 @@
   Label miss;
 
   // Name register might be clobbered.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     a1, a2, a3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
   __ bind(&miss);
   __ li(a2, Operand(Handle<String>(name)));  // Restore name.
   Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2625,7 +2510,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(a1);  // Receiver.
-  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
+  __ li(a3, Operand(callback));  // Callback info.
   __ Push(a3, a2, a0);
 
   // Do tail-call to the runtime system.
@@ -2644,8 +2529,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2691,9 +2577,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2710,7 +2597,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(t0, Operand(cell));
   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ Branch(&miss, eq, t1, Operand(t2));
@@ -2718,6 +2605,8 @@
   // Store the value in the cell.
   __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ mov(v0, a0);  // Stored value must be returned in v0.
+  // Cells are always rescanned, so no write barrier here.
+
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
   __ Ret();
@@ -2733,9 +2622,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- ra    : return address
@@ -2751,15 +2640,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  a1,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
   }
 
   // Return undefined if maps of the full prototype chain is still the same.
@@ -2770,14 +2652,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2796,24 +2678,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
-                                             callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2822,10 +2699,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<Object> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2842,9 +2719,9 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2853,17 +2730,9 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object,
-                          holder,
-                          &lookup,
-                          a0,
-                          a2,
-                          a3,
-                          a1,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2873,11 +2742,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2888,16 +2758,15 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
-    __ And(t0, a0, Operand(kSmiTagMask));
-    __ Branch(&miss, eq, t0, Operand(zero_reg));
+  if (!object.is_identical_to(holder)) {
+    __ JumpIfSmi(a0, &miss);
   }
 
   // Check that the map of the global has not changed.
   CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
 
   // Get the value from the cell.
-  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ li(a3, Operand(cell));
   __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2920,9 +2789,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
@@ -2932,7 +2801,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
   __ bind(&miss);
@@ -2942,11 +2811,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2955,15 +2824,10 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
-                                             t0, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
+                       &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2971,10 +2835,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<Object> value) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2983,7 +2848,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
   __ bind(&miss);
@@ -2994,9 +2859,10 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3005,19 +2871,11 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          a1,
-                          a0,
-                          a2,
-                          a3,
-                          t0,
-                          name,
+  GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3026,7 +2884,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3035,7 +2894,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadArrayLength(masm(), a1, a2, &miss);
   __ bind(&miss);
@@ -3045,7 +2904,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3057,7 +2917,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
   __ bind(&miss);
@@ -3069,7 +2929,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3081,7 +2942,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
 
   // Check the name hasn't changed.
-  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a0, Operand(name));
 
   GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
   __ bind(&miss);
@@ -3092,33 +2953,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
   //  -- a1    : receiver
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(a1,
-                 a2,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3130,9 +2987,8 @@
   int receiver_count = receiver_maps->length();
   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
+        eq, a2, Operand(receiver_maps->at(current)));
   }
 
   __ bind(&miss);
@@ -3140,14 +2996,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3161,16 +3017,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
 
   // Check that the name has not changed.
-  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+  __ Branch(&miss, ne, a1, Operand(name));
 
   // a3 is used as scratch register. a1 and a2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     a2, a1, a3,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
@@ -3178,11 +3029,12 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3190,29 +3042,25 @@
   //  -- ra    : return address
   //  -- a3    : scratch
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(a2,
-                 a3,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+  __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3225,10 +3073,17 @@
 
   int receiver_count = receiver_maps->length();
   __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map(receiver_maps->at(current));
-    Handle<Code> code(handler_ics->at(current));
-    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+  for (int i = 0; i < receiver_count; ++i) {
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
+          a3, Operand(receiver_maps->at(i)));
+    } else {
+      Label next_map;
+      __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
+      __ li(a3, Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -3236,11 +3091,12 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // a0    : argc
   // a1    : constructor
   // ra    : return address
@@ -3263,8 +3119,7 @@
   // a1: constructor function
   // t7: undefined
   __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-  __ And(t0, a2, Operand(kSmiTagMask));
-  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ JumpIfSmi(a2, &generic_stub_call);
   __ GetObjectType(a2, a3, t0);
   __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
 
@@ -3285,12 +3140,7 @@
   // a2: initial map
   // t7: undefined
   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(a3,
-                        t4,
-                        t5,
-                        t6,
-                        &generic_stub_call,
-                        SIZE_IN_WORDS);
+  __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3325,7 +3175,7 @@
   // t7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3457,6 +3307,7 @@
 
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -3553,6 +3404,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3795,9 +3647,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3828,7 +3680,6 @@
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
   // Check that the index is in range.
-  __ SmiUntag(t0, key);
   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3836,7 +3687,6 @@
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // a3: external array.
-  // t0: key (integer).
 
   if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Double to pixel conversion is only implemented in the runtime for now.
@@ -3848,7 +3698,6 @@
   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
   // a3: base pointer of external storage.
-  // t0: key (integer).
   // t1: value (integer).
 
   switch (elements_kind) {
@@ -3865,33 +3714,36 @@
       __ mov(v0, t1);  // Value is in range 0..255.
       __ bind(&done);
       __ mov(t1, v0);
-      __ addu(t8, a3, t0);
+
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       }
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ addu(t8, a3, t0);
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_SHORT_ELEMENTS:
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ sll(t8, t0, 1);
-      __ addu(t8, a3, t8);
+      __ addu(t8, a3, key);
       __ sh(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ sll(t8, t0, 2);
+      __ sll(t8, key, 1);
       __ addu(t8, a3, t8);
       __ sw(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_FLOAT_ELEMENTS:
       // Perform int-to-float conversion and store to memory.
+      __ SmiUntag(t0, key);
       StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
       break;
     case EXTERNAL_DOUBLE_ELEMENTS:
-      __ sll(t8, t0, 3);
+      __ sll(t8, key, 2);
       __ addu(a3, a3, t8);
       // a3: effective address of the double element
       FloatingPointHelper::Destination destination;
@@ -3913,6 +3765,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3921,12 +3774,11 @@
   }
 
   // Entry registers are intact, a0 holds the value which is the return value.
-  __ mov(v0, value);
+  __ mov(v0, a0);
   __ Ret();
 
   if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     // a3: external array.
-    // t0: index (integer).
     __ bind(&check_heap_number);
     __ GetObjectType(value, t1, t2);
     __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3934,7 +3786,6 @@
     __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
     // a3: base pointer of external storage.
-    // t0: key (integer).
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
@@ -3947,11 +3798,11 @@
 
       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         __ cvt_s_d(f0, f0);
-        __ sll(t8, t0, 2);
+        __ sll(t8, key, 1);
         __ addu(t8, a3, t8);
         __ swc1(f0, MemOperand(t8, 0));
       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-        __ sll(t8, t0, 3);
+        __ sll(t8, key, 2);
         __ addu(t8, a3, t8);
         __ sdc1(f0, MemOperand(t8, 0));
       } else {
@@ -3960,18 +3811,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -3979,6 +3830,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3989,7 +3841,7 @@
 
       // Entry registers are intact, a0 holds the value
       // which is the return value.
-      __ mov(v0, value);
+      __ mov(v0, a0);
       __ Ret();
     } else {
       // FPU is not available, do manual conversions.
@@ -4044,13 +3896,13 @@
         __ or_(t3, t7, t6);
 
         __ bind(&done);
-        __ sll(t9, a1, 2);
+        __ sll(t9, key, 1);
         __ addu(t9, a2, t9);
         __ sw(t3, MemOperand(t9, 0));
 
         // Entry registers are intact, a0 holds the value which is the return
         // value.
-        __ mov(v0, value);
+        __ mov(v0, a0);
         __ Ret();
 
         __ bind(&nan_or_infinity_or_zero);
@@ -4068,6 +3920,7 @@
         // t8: effective address of destination element.
         __ sw(t4, MemOperand(t8, 0));
         __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ mov(v0, a0);
         __ Ret();
       } else {
         bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4130,18 +3983,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -4149,6 +4002,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4223,9 +4077,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Code* stub = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+  Handle<Code> stub =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(stub, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4298,8 +4152,10 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -4308,7 +4164,7 @@
   //  -- a3    : scratch
   //  -- a4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind;
 
   Register value_reg = a0;
   Register key_reg = a1;
@@ -4342,14 +4198,32 @@
   // Compare smis.
   __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
 
-  __ Addu(scratch,
-          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(scratch3, scratch2, scratch);
-  __ sw(value_reg, MemOperand(scratch3));
-  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
-
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+    __ Addu(scratch,
+            elements_reg,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(scratch, scratch, scratch2);
+    __ sw(value_reg, MemOperand(scratch));
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ Addu(scratch,
+            elements_reg,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(scratch, scratch, scratch2);
+    __ sw(value_reg, MemOperand(scratch));
+    __ mov(receiver_reg, value_reg);
+  ASSERT(elements_kind == FAST_ELEMENTS);
+    __ RecordWrite(elements_reg,  // Object.
+                   scratch,       // Address.
+                   receiver_reg,  // Value.
+                   kRAHasNotBeenSaved,
+                   kDontSaveFPRegs);
+  }
   // value_reg (a0) is preserved.
   // Done.
   __ Ret();
@@ -4358,6 +4232,10 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4375,15 +4253,15 @@
   //  -- t2    : scratch (exponent_reg)
   //  -- t3    : scratch4
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+  Label miss_force_generic, transition_elements_kind;
 
   Register value_reg = a0;
   Register key_reg = a1;
   Register receiver_reg = a2;
-  Register scratch = a3;
-  Register elements_reg = t0;
-  Register mantissa_reg = t1;
-  Register exponent_reg = t2;
+  Register elements_reg = a3;
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
   Register scratch4 = t3;
 
   // This stub is meant to be tail-jumped to, the receiver must already
@@ -4395,90 +4273,25 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ lw(scratch,
+    __ lw(scratch1,
           FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
 
-  // Handle smi values specially.
-  __ JumpIfSmi(value_reg, &smi_value);
+  __ StoreNumberToDoubleElements(value_reg,
+                                 key_reg,
+                                 receiver_reg,
+                                 elements_reg,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3,
+                                 scratch4,
+                                 &transition_elements_kind);
 
-  // Ensure that the object is a heap number
-  __ CheckMap(value_reg,
-              scratch,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
-
-  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  __ bind(&have_double_value);
-  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  __ Addu(scratch, elements_reg, Operand(scratch4));
-  __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ sw(exponent_reg, FieldMemOperand(scratch, offset));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, value_reg);  // In delay slot.
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
-  __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
-  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-
-  __ bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  __ jmp(&have_double_value);
-
-  __ bind(&smi_value);
-  __ Addu(scratch, elements_reg,
-          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  __ Addu(scratch, scratch, scratch4);
-  // scratch is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(FPU)) {
-    destination = FloatingPointHelper::kFPURegisters;
-  } else {
-    destination = FloatingPointHelper::kCoreRegisters;
-  }
-
-  Register untagged_value = receiver_reg;
-  __ SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(
-      masm,
-      untagged_value,
-      destination,
-      f0,
-      mantissa_reg,
-      exponent_reg,
-      scratch4,
-      f2);
-  if (destination == FloatingPointHelper::kFPURegisters) {
-    CpuFeatures::Scope scope(FPU);
-    __ sdc1(f0, MemOperand(scratch, 0));
-  } else {
-    __ sw(mantissa_reg, MemOperand(scratch, 0));
-    __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
-  }
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, value_reg);  // In delay slot.
 
@@ -4487,6 +4300,10 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index e3f3c48..0944b71 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -225,7 +225,7 @@
  */
 function Mirror(type) {
   this.type_ = type;
-};
+}
 
 
 Mirror.prototype.type = function() {
@@ -239,7 +239,7 @@
  */
 Mirror.prototype.isValue = function() {
   return this instanceof ValueMirror;
-}
+};
 
 
 /**
@@ -248,7 +248,7 @@
  */
 Mirror.prototype.isUndefined = function() {
   return this instanceof UndefinedMirror;
-}
+};
 
 
 /**
@@ -257,7 +257,7 @@
  */
 Mirror.prototype.isNull = function() {
   return this instanceof NullMirror;
-}
+};
 
 
 /**
@@ -266,7 +266,7 @@
  */
 Mirror.prototype.isBoolean = function() {
   return this instanceof BooleanMirror;
-}
+};
 
 
 /**
@@ -275,7 +275,7 @@
  */
 Mirror.prototype.isNumber = function() {
   return this instanceof NumberMirror;
-}
+};
 
 
 /**
@@ -284,7 +284,7 @@
  */
 Mirror.prototype.isString = function() {
   return this instanceof StringMirror;
-}
+};
 
 
 /**
@@ -293,7 +293,7 @@
  */
 Mirror.prototype.isObject = function() {
   return this instanceof ObjectMirror;
-}
+};
 
 
 /**
@@ -302,7 +302,7 @@
  */
 Mirror.prototype.isFunction = function() {
   return this instanceof FunctionMirror;
-}
+};
 
 
 /**
@@ -311,7 +311,7 @@
  */
 Mirror.prototype.isUnresolvedFunction = function() {
   return this instanceof UnresolvedFunctionMirror;
-}
+};
 
 
 /**
@@ -320,7 +320,7 @@
  */
 Mirror.prototype.isArray = function() {
   return this instanceof ArrayMirror;
-}
+};
 
 
 /**
@@ -329,7 +329,7 @@
  */
 Mirror.prototype.isDate = function() {
   return this instanceof DateMirror;
-}
+};
 
 
 /**
@@ -338,7 +338,7 @@
  */
 Mirror.prototype.isRegExp = function() {
   return this instanceof RegExpMirror;
-}
+};
 
 
 /**
@@ -347,7 +347,7 @@
  */
 Mirror.prototype.isError = function() {
   return this instanceof ErrorMirror;
-}
+};
 
 
 /**
@@ -356,7 +356,7 @@
  */
 Mirror.prototype.isProperty = function() {
   return this instanceof PropertyMirror;
-}
+};
 
 
 /**
@@ -365,7 +365,7 @@
  */
 Mirror.prototype.isFrame = function() {
   return this instanceof FrameMirror;
-}
+};
 
 
 /**
@@ -374,7 +374,7 @@
  */
 Mirror.prototype.isScript = function() {
   return this instanceof ScriptMirror;
-}
+};
 
 
 /**
@@ -383,7 +383,7 @@
  */
 Mirror.prototype.isContext = function() {
   return this instanceof ContextMirror;
-}
+};
 
 
 /**
@@ -392,7 +392,7 @@
  */
 Mirror.prototype.isScope = function() {
   return this instanceof ScopeMirror;
-}
+};
 
 
 /**
@@ -400,7 +400,7 @@
  */
 Mirror.prototype.allocateHandle_ = function() {
   this.handle_ = next_handle_++;
-}
+};
 
 
 /**
@@ -409,13 +409,13 @@
  */
 Mirror.prototype.allocateTransientHandle_ = function() {
   this.handle_ = next_transient_handle_--;
-}
+};
 
 
 Mirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "#<" + this.constructor.name + ">";
-}
+};
 
 
 /**
@@ -480,7 +480,7 @@
 
 UndefinedMirror.prototype.toText = function() {
   return 'undefined';
-}
+};
 
 
 /**
@@ -496,7 +496,7 @@
 
 NullMirror.prototype.toText = function() {
   return 'null';
-}
+};
 
 
 /**
@@ -513,7 +513,7 @@
 
 BooleanMirror.prototype.toText = function() {
   return this.value_ ? 'true' : 'false';
-}
+};
 
 
 /**
@@ -530,7 +530,7 @@
 
 NumberMirror.prototype.toText = function() {
   return %NumberToString(this.value_);
-}
+};
 
 
 /**
@@ -555,11 +555,11 @@
            '... (length: ' + this.length() + ')';
   }
   return this.value_;
-}
+};
 
 StringMirror.prototype.toText = function() {
   return this.getTruncatedValue(kMaxProtocolStringLength);
-}
+};
 
 
 /**
@@ -898,7 +898,7 @@
 
 FunctionMirror.prototype.toText = function() {
   return this.source();
-}
+};
 
 
 /**
@@ -951,7 +951,7 @@
 
 UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
   return [];
-}
+};
 
 
 /**
@@ -971,7 +971,8 @@
 };
 
 
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
+                                                            opt_to_index) {
   var from_index = opt_from_index || 0;
   var to_index = opt_to_index || this.length() - 1;
   if (from_index > to_index) return new Array();
@@ -987,7 +988,7 @@
     values[i - from_index] = value;
   }
   return values;
-}
+};
 
 
 /**
@@ -1005,7 +1006,7 @@
 DateMirror.prototype.toText = function() {
   var s = JSON.stringify(this.value_);
   return s.substring(1, s.length - 1);  // cut quotes
-}
+};
 
 
 /**
@@ -1059,7 +1060,7 @@
 RegExpMirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "/" + this.source() + "/";
-}
+};
 
 
 /**
@@ -1087,12 +1088,12 @@
   // Use the same text representation as in messages.js.
   var text;
   try {
-    str = %_CallFunction(this.value_, builtins.errorToString);
+    str = %_CallFunction(this.value_, builtins.ErrorToString);
   } catch (e) {
     str = '#<Error>';
   }
   return str;
-}
+};
 
 
 /**
@@ -1110,7 +1111,7 @@
   this.value_ = details[0];
   this.details_ = details[1];
   if (details.length > 2) {
-    this.exception_ = details[2]
+    this.exception_ = details[2];
     this.getter_ = details[3];
     this.setter_ = details[4];
   }
@@ -1120,22 +1121,22 @@
 
 PropertyMirror.prototype.isReadOnly = function() {
   return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-}
+};
 
 
 PropertyMirror.prototype.isEnum = function() {
   return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-}
+};
 
 
 PropertyMirror.prototype.canDelete = function() {
   return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-}
+};
 
 
 PropertyMirror.prototype.name = function() {
   return this.name_;
-}
+};
 
 
 PropertyMirror.prototype.isIndexed = function() {
@@ -1145,12 +1146,12 @@
     }
   }
   return true;
-}
+};
 
 
 PropertyMirror.prototype.value = function() {
   return MakeMirror(this.value_, false);
-}
+};
 
 
 /**
@@ -1159,22 +1160,22 @@
  */
 PropertyMirror.prototype.isException = function() {
   return this.exception_ ? true : false;
-}
+};
 
 
 PropertyMirror.prototype.attributes = function() {
   return %DebugPropertyAttributesFromDetails(this.details_);
-}
+};
 
 
 PropertyMirror.prototype.propertyType = function() {
   return %DebugPropertyTypeFromDetails(this.details_);
-}
+};
 
 
 PropertyMirror.prototype.insertionIndex = function() {
   return %DebugPropertyIndexFromDetails(this.details_);
-}
+};
 
 
 /**
@@ -1183,7 +1184,7 @@
  */
 PropertyMirror.prototype.hasGetter = function() {
   return this.getter_ ? true : false;
-}
+};
 
 
 /**
@@ -1192,7 +1193,7 @@
  */
 PropertyMirror.prototype.hasSetter = function() {
   return this.setter_ ? true : false;
-}
+};
 
 
 /**
@@ -1206,7 +1207,7 @@
   } else {
     return GetUndefinedMirror();
   }
-}
+};
 
 
 /**
@@ -1220,7 +1221,7 @@
   } else {
     return GetUndefinedMirror();
   }
-}
+};
 
 
 /**
@@ -1233,7 +1234,7 @@
   return (this.propertyType() == PropertyType.Interceptor) ||
          ((this.propertyType() == PropertyType.Callbacks) &&
           !this.hasGetter() && !this.hasSetter());
-}
+};
 
 
 const kFrameDetailsFrameIdIndex = 0;
@@ -1284,63 +1285,63 @@
 FrameDetails.prototype.frameId = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFrameIdIndex];
-}
+};
 
 
 FrameDetails.prototype.receiver = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsReceiverIndex];
-}
+};
 
 
 FrameDetails.prototype.func = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFunctionIndex];
-}
+};
 
 
 FrameDetails.prototype.isConstructCall = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsConstructCallIndex];
-}
+};
 
 
 FrameDetails.prototype.isAtReturn = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsAtReturnIndex];
-}
+};
 
 
 FrameDetails.prototype.isDebuggerFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagDebuggerFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
 
 
 FrameDetails.prototype.isOptimizedFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagOptimizedFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-}
+};
 
 
 FrameDetails.prototype.isInlinedFrame = function() {
   return this.inlinedFrameIndex() > 0;
-}
+};
 
 
 FrameDetails.prototype.inlinedFrameIndex = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagInlinedFrameIndexMask;
-  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
-}
+  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
+};
 
 
 FrameDetails.prototype.argumentCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsArgumentCountIndex];
-}
+};
 
 
 FrameDetails.prototype.argumentName = function(index) {
@@ -1348,9 +1349,9 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex]
+                         kFrameDetailsNameIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.argumentValue = function(index) {
@@ -1358,45 +1359,45 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex]
+                         kFrameDetailsValueIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.localCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsLocalCountIndex];
-}
+};
 
 
 FrameDetails.prototype.sourcePosition = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsSourcePositionIndex];
-}
+};
 
 
 FrameDetails.prototype.localName = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize
+                        this.argumentCount() * kFrameDetailsNameValueSize;
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex]
+                         kFrameDetailsNameIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.localValue = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize
+                        this.argumentCount() * kFrameDetailsNameValueSize;
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex]
+                         kFrameDetailsValueIndex];
   }
-}
+};
 
 
 FrameDetails.prototype.returnValue = function() {
@@ -1407,12 +1408,12 @@
   if (this.details_[kFrameDetailsAtReturnIndex]) {
     return this.details_[return_value_offset];
   }
-}
+};
 
 
 FrameDetails.prototype.scopeCount = function() {
   return %GetScopeCount(this.break_id_, this.frameId());
-}
+};
 
 
 /**
@@ -1575,7 +1576,8 @@
 };
 
 
-FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
+FrameMirror.prototype.evaluate = function(source, disable_break,
+                                          opt_context_object) {
   var result = %DebugEvaluate(this.break_id_,
                               this.details_.frameId(),
                               this.details_.inlinedFrameIndex(),
@@ -1599,7 +1601,8 @@
     result += '[debugger]';
   } else {
     // If the receiver has a className which is 'global' don't display it.
-    var display_receiver = !receiver.className || receiver.className() != 'global';
+    var display_receiver =
+      !receiver.className || (receiver.className() != 'global');
     if (display_receiver) {
       result += receiver.toText();
     }
@@ -1661,7 +1664,7 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.sourceAndPositionText = function() {
@@ -1693,13 +1696,13 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.localsText = function() {
   // Format local variables.
   var result = '';
-  var locals_count = this.localCount()
+  var locals_count = this.localCount();
   if (locals_count > 0) {
     for (var i = 0; i < locals_count; ++i) {
       result += '      var ';
@@ -1711,7 +1714,7 @@
   }
 
   return result;
-}
+};
 
 
 FrameMirror.prototype.toText = function(opt_locals) {
@@ -1726,7 +1729,7 @@
     result += this.localsText();
   }
   return result;
-}
+};
 
 
 const kScopeDetailsTypeIndex = 0;
@@ -1744,13 +1747,13 @@
 ScopeDetails.prototype.type = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsTypeIndex];
-}
+};
 
 
 ScopeDetails.prototype.object = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsObjectIndex];
-}
+};
 
 
 /**
@@ -1862,12 +1865,12 @@
 ScriptMirror.prototype.locationFromPosition = function(
     position, include_resource_offset) {
   return this.script_.locationFromPosition(position, include_resource_offset);
-}
+};
 
 
 ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
   return this.script_.sourceSlice(opt_from_line, opt_to_line);
-}
+};
 
 
 ScriptMirror.prototype.context = function() {
@@ -1907,7 +1910,7 @@
   }
   result += ')';
   return result;
-}
+};
 
 
 /**
@@ -1965,7 +1968,7 @@
  */
 JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
   return this.serialize_(mirror, true, true);
-}
+};
 
 
 /**
@@ -1978,7 +1981,7 @@
 JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
   var json = this.serialize_(mirror, false, true);
   return json;
-}
+};
 
 
 /**
@@ -2000,17 +2003,17 @@
   }
 
   return content;
-}
+};
 
 
 JSONProtocolSerializer.prototype.includeSource_ = function() {
   return this.options_ && this.options_.includeSource;
-}
+};
 
 
 JSONProtocolSerializer.prototype.inlineRefs_ = function() {
   return this.options_ && this.options_.inlineRefs;
-}
+};
 
 
 JSONProtocolSerializer.prototype.maxStringLength_ = function() {
@@ -2019,7 +2022,7 @@
     return kMaxProtocolStringLength;
   }
   return this.options_.maxStringLength;
-}
+};
 
 
 JSONProtocolSerializer.prototype.add_ = function(mirror) {
@@ -2032,7 +2035,7 @@
 
   // Add the mirror to the list of mirrors to be serialized.
   this.mirrors_.push(mirror);
-}
+};
 
 
 /**
@@ -2139,7 +2142,7 @@
       break;
 
     case PROPERTY_TYPE:
-      throw new Error('PropertyMirror cannot be serialized independeltly')
+      throw new Error('PropertyMirror cannot be serialized independeltly');
       break;
 
     case FRAME_TYPE:
@@ -2179,7 +2182,7 @@
           mirror.evalFromScript()) {
         content.evalFromScript =
             this.serializeReference(mirror.evalFromScript());
-        var evalFromLocation = mirror.evalFromLocation()
+        var evalFromLocation = mirror.evalFromLocation();
         if (evalFromLocation) {
           content.evalFromLocation = { line: evalFromLocation.line,
                                        column: evalFromLocation.column };
@@ -2203,7 +2206,7 @@
 
   // Create and return the JSON string.
   return content;
-}
+};
 
 
 /**
@@ -2278,7 +2281,7 @@
     }
   }
   content.properties = p;
-}
+};
 
 
 /**
@@ -2342,7 +2345,7 @@
     result.ref = propertyValue.handle();
   }
   return result;
-}
+};
 
 
 JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
@@ -2362,7 +2365,7 @@
   var x = new Array(mirror.argumentCount());
   for (var i = 0; i < mirror.argumentCount(); i++) {
     var arg = {};
-    var argument_name = mirror.argumentName(i)
+    var argument_name = mirror.argumentName(i);
     if (argument_name) {
       arg.name = argument_name;
     }
@@ -2392,7 +2395,7 @@
       index: i
     });
   }
-}
+};
 
 
 JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
@@ -2402,7 +2405,7 @@
   content.object = this.inlineRefs_() ?
                    this.serializeValue(mirror.scopeObject()) :
                    this.serializeReference(mirror.scopeObject());
-}
+};
 
 
 /**
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index a791dbb..bc0c2fc 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -312,7 +312,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index e9ca6c0..64bda94 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -94,6 +94,9 @@
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayVerify();
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpaceVerify();
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
       break;
@@ -153,6 +156,12 @@
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayVerify();
       break;
+    case JS_SET_TYPE:
+      JSSet::cast(this)->JSSetVerify();
+      break;
+    case JS_MAP_TYPE:
+      JSMap::cast(this)->JSMapVerify();
+      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
@@ -207,6 +216,11 @@
 }
 
 
+void FreeSpace::FreeSpaceVerify() {
+  ASSERT(IsFreeSpace());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayVerify() {
   ASSERT(IsExternalPixelArray());
 }
@@ -255,12 +269,18 @@
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
+
+  if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+    ASSERT(this->elements()->IsFixedArray());
+    ASSERT(this->elements()->length() >= 2);
+  }
+
   if (HasFastProperties()) {
     CHECK_EQ(map()->unused_property_fields(),
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
-  ASSERT_EQ(map()->has_fast_elements(),
+  ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()),
             (elements()->map() == GetHeap()->fixed_array_map() ||
              elements()->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -322,7 +342,8 @@
       double value = get_scalar(i);
       ASSERT(!isnan(value) ||
              (BitCast<uint64_t>(value) ==
-              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+             ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
     }
   }
 }
@@ -387,6 +408,7 @@
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
   VerifyObjectField(kNextFunctionLinkOffset);
+  CHECK(code()->IsCode());
   CHECK(next_function_link()->IsUndefined() ||
         next_function_link()->IsJSFunction());
 }
@@ -446,9 +468,8 @@
   } else {
     ASSERT(number->IsSmi());
     int value = Smi::cast(number)->value();
-    // Hidden oddballs have negative smis.
-    const int kLeastHiddenOddballNumber = -4;
     ASSERT(value <= 1);
+    // Hidden oddballs have negative smis.
     ASSERT(value >= kLeastHiddenOddballNumber);
   }
 }
@@ -463,6 +484,7 @@
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
                   kCodeAlignment));
+  relocation_info()->Verify();
   Address last_gc_pc = NULL;
   for (RelocIterator it(this); !it.done(); it.next()) {
     it.rinfo()->Verify();
@@ -484,11 +506,27 @@
 }
 
 
+void JSSet::JSSetVerify() {
+  CHECK(IsJSSet());
+  JSObjectVerify();
+  VerifyHeapPointer(table());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
+void JSMap::JSMapVerify() {
+  CHECK(IsJSMap());
+  JSObjectVerify();
+  VerifyHeapPointer(table());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
+}
+
+
 void JSWeakMap::JSWeakMapVerify() {
   CHECK(IsJSWeakMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
 }
 
 
@@ -535,13 +573,14 @@
 
 
 void JSProxy::JSProxyVerify() {
-  ASSERT(IsJSProxy());
+  CHECK(IsJSProxy());
   VerifyPointer(handler());
+  ASSERT(hash()->IsSmi() || hash()->IsUndefined());
 }
 
 
 void JSFunctionProxy::JSFunctionProxyVerify() {
-  ASSERT(IsJSFunctionProxy());
+  CHECK(IsJSFunctionProxy());
   JSProxyVerify();
   VerifyPointer(call_trap());
   VerifyPointer(construct_trap());
@@ -700,7 +739,7 @@
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      SeededNumberDictionary* dict = element_dictionary();
+      NumberDictionary* dict = element_dictionary();
       info->number_of_slow_used_elements_ += dict->NumberOfElements();
       info->number_of_slow_unused_elements_ +=
           dict->Capacity() - dict->NumberOfElements();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index e7b6a34..39d6e04 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -43,8 +43,11 @@
 #include "isolate.h"
 #include "property.h"
 #include "spaces.h"
+#include "store-buffer.h"
 #include "v8memory.h"
 
+#include "incremental-marking.h"
+
 namespace v8 {
 namespace internal {
 
@@ -64,6 +67,13 @@
 }
 
 
+#define TYPE_CHECKER(type, instancetype)                                \
+  bool Object::Is##type() {                                             \
+  return Object::IsHeapObject() &&                                      \
+      HeapObject::cast(this)->map()->instance_type() == instancetype;   \
+  }
+
+
 #define CAST_ACCESSOR(type)                     \
   type* type::cast(Object* object) {            \
     ASSERT(object->Is##type());                 \
@@ -80,16 +90,7 @@
   type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
   void holder::set_##name(type* value, WriteBarrierMode mode) {         \
     WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);           \
-  }
-
-
-// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
-#define ACCESSORS_GCSAFE(holder, name, type, offset)                    \
-  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
-  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
-    WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode);                \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);    \
   }
 
 
@@ -118,6 +119,23 @@
   }
 
 
+bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+                                         ElementsKind to_kind) {
+  if (to_kind == FAST_ELEMENTS) {
+    return from_kind == FAST_SMI_ONLY_ELEMENTS ||
+        from_kind == FAST_DOUBLE_ELEMENTS;
+  } else {
+    return to_kind == FAST_DOUBLE_ELEMENTS &&
+        from_kind == FAST_SMI_ONLY_ELEMENTS;
+  }
+}
+
+
+bool Object::IsFixedArrayBase() {
+  return IsFixedArray() || IsFixedDoubleArray();
+}
+
+
 bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
   // There is a constraint on the object; check.
   if (!this->IsJSObject()) return false;
@@ -147,12 +165,15 @@
 }
 
 
-bool Object::IsHeapNumber() {
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
+bool Object::NonFailureIsHeapObject() {
+  ASSERT(!this->IsFailure());
+  return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
 }
 
 
+TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+
+
 bool Object::IsString() {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
@@ -165,6 +186,13 @@
 }
 
 
+bool Object::IsSpecFunction() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
+}
+
+
 bool Object::IsSymbol() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -396,17 +424,18 @@
 }
 
 
-bool Object::IsByteArray() {
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
+
+
+bool Object::IsFiller() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+  return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
 }
 
 
-bool Object::IsExternalPixelArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-          EXTERNAL_PIXEL_ARRAY_TYPE;
-}
+TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
 
 
 bool Object::IsExternalArray() {
@@ -419,60 +448,14 @@
 }
 
 
-bool Object::IsExternalByteArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedByteArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalShortArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedShortArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalIntArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalUnsignedIntArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalFloatArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_FLOAT_ARRAY_TYPE;
-}
-
-
-bool Object::IsExternalDoubleArray() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() ==
-      EXTERNAL_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
+TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
+TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
 
 
 bool MaybeObject::IsFailure() {
@@ -509,59 +492,34 @@
 
 
 bool Object::IsJSReceiver() {
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
 
 bool Object::IsJSObject() {
-  return IsJSReceiver() && !IsJSProxy();
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
 }
 
 
 bool Object::IsJSProxy() {
-  return Object::IsHeapObject() &&
-     (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
-      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
 }
 
 
-bool Object::IsJSFunctionProxy() {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
-}
-
-
-bool Object::IsJSWeakMap() {
-  return Object::IsJSObject() &&
-      HeapObject::cast(this)->map()->instance_type() == JS_WEAK_MAP_TYPE;
-}
-
-
-bool Object::IsJSContextExtensionObject() {
-  return IsHeapObject()
-      && (HeapObject::cast(this)->map()->instance_type() ==
-          JS_CONTEXT_EXTENSION_OBJECT_TYPE);
-}
-
-
-bool Object::IsMap() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
-}
-
-
-bool Object::IsFixedArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
-}
-
-
-bool Object::IsFixedDoubleArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() ==
-          FIXED_DOUBLE_ARRAY_TYPE;
-}
+TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
+TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
 
 
 bool Object::IsDescriptorArray() {
@@ -617,17 +575,14 @@
 }
 
 
-bool Object::IsSerializedScopeInfo() {
+bool Object::IsScopeInfo() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
+      HeapObject::cast(this)->GetHeap()->scope_info_map();
 }
 
 
-bool Object::IsJSFunction() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
-}
+TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
 
 
 template <> inline bool Is<JSFunction>(Object* obj) {
@@ -635,44 +590,12 @@
 }
 
 
-bool Object::IsCode() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
-}
-
-
-bool Object::IsOddball() {
-  ASSERT(HEAP->is_safe_to_read_maps());
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
-}
-
-
-bool Object::IsJSGlobalPropertyCell() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type()
-      == JS_GLOBAL_PROPERTY_CELL_TYPE;
-}
-
-
-bool Object::IsSharedFunctionInfo() {
-  return Object::IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       SHARED_FUNCTION_INFO_TYPE);
-}
-
-
-bool Object::IsJSValue() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
-}
-
-
-bool Object::IsJSMessageObject() {
-  return Object::IsHeapObject()
-      && (HeapObject::cast(this)->map()->instance_type() ==
-          JS_MESSAGE_OBJECT_TYPE);
-}
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(Oddball, ODDBALL_TYPE)
+TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
 
 
 bool Object::IsStringWrapper() {
@@ -680,10 +603,7 @@
 }
 
 
-bool Object::IsForeign() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
-}
+TYPE_CHECKER(Foreign, FOREIGN_TYPE)
 
 
 bool Object::IsBoolean() {
@@ -692,16 +612,8 @@
 }
 
 
-bool Object::IsJSArray() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
-}
-
-
-bool Object::IsJSRegExp() {
-  return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
-}
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
 
 
 template <> inline bool Is<JSArray>(Object* obj) {
@@ -738,7 +650,10 @@
     return false;
   }
 #ifdef DEBUG
-  reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    reinterpret_cast<JSFunctionResultCache*>(this)->
+        JSFunctionResultCacheVerify();
+  }
 #endif
   return true;
 }
@@ -750,7 +665,9 @@
     return false;
   }
 #ifdef DEBUG
-  reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+  if (FLAG_verify_heap) {
+    reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+  }
 #endif
   return true;
 }
@@ -799,18 +716,8 @@
 }
 
 
-bool Object::IsJSGlobalObject() {
-  return IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       JS_GLOBAL_OBJECT_TYPE);
-}
-
-
-bool Object::IsJSBuiltinsObject() {
-  return IsHeapObject() &&
-      (HeapObject::cast(this)->map()->instance_type() ==
-       JS_BUILTINS_OBJECT_TYPE);
-}
+TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
+TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
 
 
 bool Object::IsUndetectableObject() {
@@ -939,21 +846,20 @@
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
 
-// TODO(isolates): Pass heap in to these macros.
-#define WRITE_BARRIER(object, offset) \
-  object->GetHeap()->RecordWrite(object->address(), offset);
+#define WRITE_BARRIER(heap, object, offset, value)                      \
+  heap->incremental_marking()->RecordWrite(                             \
+      object, HeapObject::RawField(object, offset), value);             \
+  if (heap->InNewSpace(value)) {                                        \
+    heap->RecordWrite(object->address(), offset);                       \
+  }
 
-// CONDITIONAL_WRITE_BARRIER must be issued after the actual
-// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
-  if (mode == UPDATE_WRITE_BARRIER) { \
-    heap->RecordWrite(object->address(), offset); \
-  } else { \
-    ASSERT(mode == SKIP_WRITE_BARRIER); \
-    ASSERT(heap->InNewSpace(object) || \
-           !heap->InNewSpace(READ_FIELD(object, offset)) || \
-           Page::FromAddress(object->address())->           \
-               IsRegionDirty(object->address() + offset));  \
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode)    \
+  if (mode == UPDATE_WRITE_BARRIER) {                                   \
+    heap->incremental_marking()->RecordWrite(                           \
+      object, HeapObject::RawField(object, offset), value);             \
+    if (heap->InNewSpace(value)) {                                      \
+      heap->RecordWrite(object->address(), offset);                     \
+    }                                                                   \
   }
 
 #ifndef V8_TARGET_ARCH_MIPS
@@ -974,7 +880,6 @@
   #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
 #endif  // V8_TARGET_ARCH_MIPS
 
-
 #ifndef V8_TARGET_ARCH_MIPS
   #define WRITE_DOUBLE_FIELD(p, offset, value) \
     (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -1169,91 +1074,6 @@
 }
 
 
-bool MapWord::IsMarked() {
-  return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
-  value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
-  value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
-  return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
-  value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
-  value_ &= ~kOverflowMask;
-}
-
-
-MapWord MapWord::EncodeAddress(Address map_address, int offset) {
-  // Offset is the distance in live bytes from the first live object in the
-  // same page. The offset between two objects in the same page should not
-  // exceed the object area size of a page.
-  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
-  uintptr_t compact_offset = offset >> kObjectAlignmentBits;
-  ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
-  Page* map_page = Page::FromAddress(map_address);
-  ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
-
-  uintptr_t map_page_offset =
-      map_page->Offset(map_address) >> kMapAlignmentBits;
-
-  uintptr_t encoding =
-      (compact_offset << kForwardingOffsetShift) |
-      (map_page_offset << kMapPageOffsetShift) |
-      (map_page->mc_page_index << kMapPageIndexShift);
-  return MapWord(encoding);
-}
-
-
-Address MapWord::DecodeMapAddress(MapSpace* map_space) {
-  int map_page_index =
-      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
-  ASSERT_MAP_PAGE_INDEX(map_page_index);
-
-  int map_page_offset = static_cast<int>(
-      ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
-      kMapAlignmentBits);
-
-  return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
-int MapWord::DecodeOffset() {
-  // The offset field is represented in the kForwardingOffsetBits
-  // most-significant bits.
-  uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
-  ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
-  return static_cast<int>(offset);
-}
-
-
-MapWord MapWord::FromEncodedAddress(Address address) {
-  return MapWord(reinterpret_cast<uintptr_t>(address));
-}
-
-
-Address MapWord::ToEncodedAddress() {
-  return reinterpret_cast<Address>(value_);
-}
-
-
 #ifdef DEBUG
 void HeapObject::VerifyObjectField(int offset) {
   VerifyPointer(READ_FIELD(this, offset));
@@ -1266,12 +1086,11 @@
 
 
 Heap* HeapObject::GetHeap() {
-  // During GC, the map pointer in HeapObject is used in various ways that
-  // prevent us from retrieving Heap from the map.
-  // Assert that we are not in GC, implement GC code in a way that it doesn't
-  // pull heap from the map.
-  ASSERT(HEAP->is_safe_to_read_maps());
-  return map()->heap();
+  Heap* heap =
+      MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
 }
 
 
@@ -1287,6 +1106,17 @@
 
 void HeapObject::set_map(Map* value) {
   set_map_word(MapWord::FromMap(value));
+  if (value != NULL) {
+    // TODO(1600) We are passing NULL as a slot because maps can never be on
+    // evacuation candidate.
+    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+  }
+}
+
+
+// Unsafe accessor omitting write barrier.
+void HeapObject::set_map_unsafe(Map* value) {
+  set_map_word(MapWord::FromMap(value));
 }
 
 
@@ -1329,47 +1159,6 @@
 }
 
 
-bool HeapObject::IsMarked() {
-  return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
-  ASSERT(!IsMarked());
-  MapWord first_word = map_word();
-  first_word.SetMark();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
-  ASSERT(IsMarked());
-  MapWord first_word = map_word();
-  first_word.ClearMark();
-  set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
-  return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
-  MapWord first_word = map_word();
-  first_word.SetOverflow();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
-  ASSERT(IsOverflowed());
-  MapWord first_word = map_word();
-  first_word.ClearOverflow();
-  set_map_word(first_word);
-}
-
-
 double HeapNumber::value() {
   return READ_DOUBLE_FIELD(this, kValueOffset);
 }
@@ -1396,20 +1185,80 @@
 
 FixedArrayBase* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
-  ASSERT(array->HasValidElements());
   return static_cast<FixedArrayBase*>(array);
 }
 
+void JSObject::ValidateSmiOnlyElements() {
+#if DEBUG
+  if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+    Heap* heap = GetHeap();
+    // Don't use elements, since integrity checks will fail if there
+    // are filler pointers in the array.
+    FixedArray* fixed_array =
+        reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
+    Map* map = fixed_array->map();
+    // Arrays that have been shifted in place can't be verified.
+    if (map != heap->raw_unchecked_one_pointer_filler_map() &&
+        map != heap->raw_unchecked_two_pointer_filler_map() &&
+        map != heap->free_space_map()) {
+      for (int i = 0; i < fixed_array->length(); i++) {
+        Object* current = fixed_array->get(i);
+        ASSERT(current->IsSmi() || current == heap->the_hole_value());
+      }
+    }
+  }
+#endif
+}
+
+
+MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+#if DEBUG
+  ValidateSmiOnlyElements();
+#endif
+  if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
+    Object* obj;
+    MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    set_map(Map::cast(obj));
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
+                                                uint32_t count) {
+  if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+    for (uint32_t i = 0; i < count; ++i) {
+      Object* current = *objects++;
+      if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
+        return EnsureCanContainNonSmiElements();
+      }
+    }
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
+  Object** objects = reinterpret_cast<Object**>(
+      FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
+  return EnsureCanContainElements(objects, elements->length());
+}
+
 
 void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
-  ASSERT(map()->has_fast_elements() ==
+  ASSERT((map()->has_fast_elements() ||
+          map()->has_fast_smi_only_elements()) ==
          (value->map() == GetHeap()->fixed_array_map() ||
           value->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_double_elements() ==
          value->IsFixedDoubleArray());
   ASSERT(value->HasValidElements());
+#ifdef DEBUG
+  ValidateSmiOnlyElements();
+#endif
   WRITE_FIELD(this, kElementsOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
 }
 
 
@@ -1420,7 +1269,7 @@
 
 
 void JSObject::initialize_elements() {
-  ASSERT(map()->has_fast_elements());
+  ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
   ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
 }
@@ -1428,9 +1277,11 @@
 
 MaybeObject* JSObject::ResetElements() {
   Object* obj;
-  { MaybeObject* maybe_obj = map()->GetFastElementsMap();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  ElementsKind elements_kind = FLAG_smi_only_arrays
+      ? FAST_SMI_ONLY_ELEMENTS
+      : FAST_ELEMENTS;
+  MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind);
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   set_map(Map::cast(obj));
   initialize_elements();
   return this;
@@ -1442,12 +1293,12 @@
 
 
 byte Oddball::kind() {
-  return READ_BYTE_FIELD(this, kKindOffset);
+  return Smi::cast(READ_FIELD(this, kKindOffset))->value();
 }
 
 
 void Oddball::set_kind(byte value) {
-  WRITE_BYTE_FIELD(this, kKindOffset, value);
+  WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
 }
 
 
@@ -1460,6 +1311,8 @@
   // The write barrier is not used for global property cells.
   ASSERT(!val->IsJSGlobalPropertyCell());
   WRITE_FIELD(this, kValueOffset, val);
+  GetHeap()->incremental_marking()->RecordWrite(
+      this, HeapObject::RawField(this, kValueOffset), val);
 }
 
 
@@ -1528,7 +1381,17 @@
   // to adjust the index here.
   int offset = GetHeaderSize() + (kPointerSize * index);
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+
+void JSObject::SetInternalField(int index, Smi* value) {
+  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  // Internal objects do follow immediately after the header, whereas in-object
+  // properties are at the end of the object. Therefore there is no need
+  // to adjust the index here.
+  int offset = GetHeaderSize() + (kPointerSize * index);
+  WRITE_FIELD(this, offset, value);
 }
 
 
@@ -1554,7 +1417,7 @@
   if (index < 0) {
     int offset = map()->instance_size() + (index * kPointerSize);
     WRITE_FIELD(this, offset, value);
-    WRITE_BARRIER(this, offset);
+    WRITE_BARRIER(GetHeap(), this, offset, value);
   } else {
     ASSERT(index < properties()->length());
     properties()->set(index, value);
@@ -1588,16 +1451,32 @@
   ASSERT(index < 0);
   int offset = map()->instance_size() + (index * kPointerSize);
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
   return value;
 }
 
 
 
-void JSObject::InitializeBody(int object_size, Object* value) {
-  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
-  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
-    WRITE_FIELD(this, offset, value);
+void JSObject::InitializeBody(Map* map,
+                              Object* pre_allocated_value,
+                              Object* filler_value) {
+  ASSERT(!filler_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(filler_value));
+  ASSERT(!pre_allocated_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(pre_allocated_value));
+  int size = map->instance_size();
+  int offset = kHeaderSize;
+  if (filler_value != pre_allocated_value) {
+    int pre_allocated = map->pre_allocated_property_fields();
+    ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+    for (int i = 0; i < pre_allocated; i++) {
+      WRITE_FIELD(this, offset, pre_allocated_value);
+      offset += kPointerSize;
+    }
+  }
+  while (offset < size) {
+    WRITE_FIELD(this, offset, filler_value);
+    offset += kPointerSize;
   }
 }
 
@@ -1683,7 +1562,7 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
 }
 
 
@@ -1772,7 +1651,7 @@
 
 void FixedDoubleArray::Initialize(FixedArray* from) {
   int old_length = from->length();
-  ASSERT(old_length < length());
+  ASSERT(old_length <= length());
   for (int i = 0; i < old_length; i++) {
     Object* hole_or_object = from->get(i);
     if (hole_or_object->IsTheHole()) {
@@ -1789,7 +1668,7 @@
 }
 
 
-void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
+void FixedDoubleArray::Initialize(NumberDictionary* from) {
   int offset = kHeaderSize;
   for (int current = 0; current < length(); ++current) {
     WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
@@ -1806,7 +1685,9 @@
 
 
 WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
-  if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+  Heap* heap = GetHeap();
+  if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
+  if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
   return UPDATE_WRITE_BARRIER;
 }
 
@@ -1818,11 +1699,13 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
 }
 
 
-void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
+void FixedArray::NoWriteBarrierSet(FixedArray* array,
+                                   int index,
+                                   Object* value) {
   ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
   ASSERT(index >= 0 && index < array->length());
   ASSERT(!HEAP->InNewSpace(value));
@@ -1879,7 +1762,7 @@
                                WriteBarrierMode mode) {
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
 }
 
 
@@ -1914,10 +1797,12 @@
 }
 
 
-void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
+void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
+                                         int first,
+                                         int second) {
   Object* tmp = array->get(first);
-  fast_set(array, first, array->get(second));
-  fast_set(array, second, tmp);
+  NoWriteBarrierSet(array, first, array->get(second));
+  NoWriteBarrierSet(array, second, tmp);
 }
 
 
@@ -1992,19 +1877,17 @@
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
   Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->address());
+  return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
 }
 
 
 bool DescriptorArray::IsProperty(int descriptor_number) {
-  return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+  return IsRealProperty(GetType(descriptor_number));
 }
 
 
 bool DescriptorArray::IsTransition(int descriptor_number) {
-  PropertyType t = GetType(descriptor_number);
-  return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-      t == ELEMENTS_TRANSITION;
+  return IsTransitionType(GetType(descriptor_number));
 }
 
 
@@ -2025,7 +1908,9 @@
 }
 
 
-void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(int descriptor_number,
+                          Descriptor* desc,
+                          const WhitenessWitness&) {
   // Range check.
   ASSERT(descriptor_number < number_of_descriptors());
 
@@ -2033,26 +1918,53 @@
   ASSERT(!HEAP->InNewSpace(desc->GetKey()));
   ASSERT(!HEAP->InNewSpace(desc->GetValue()));
 
-  fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
+  NoWriteBarrierSet(this,
+                    ToKeyIndex(descriptor_number),
+                    desc->GetKey());
   FixedArray* content_array = GetContentArray();
-  fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
-  fast_set(content_array, ToDetailsIndex(descriptor_number),
-           desc->GetDetails().AsSmi());
+  NoWriteBarrierSet(content_array,
+                    ToValueIndex(descriptor_number),
+                    desc->GetValue());
+  NoWriteBarrierSet(content_array,
+                    ToDetailsIndex(descriptor_number),
+                    desc->GetDetails().AsSmi());
 }
 
 
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
+void DescriptorArray::CopyFrom(int index,
+                               DescriptorArray* src,
+                               int src_index,
+                               const WhitenessWitness& witness) {
   Descriptor desc;
   src->Get(src_index, &desc);
-  Set(index, &desc);
+  Set(index, &desc, witness);
 }
 
 
-void DescriptorArray::Swap(int first, int second) {
-  fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
+  NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
   FixedArray* content_array = GetContentArray();
-  fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
-  fast_swap(content_array, ToDetailsIndex(first),  ToDetailsIndex(second));
+  NoWriteBarrierSwap(content_array,
+                     ToValueIndex(first),
+                     ToValueIndex(second));
+  NoWriteBarrierSwap(content_array,
+                     ToDetailsIndex(first),
+                     ToDetailsIndex(second));
+}
+
+
+DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
+    : marking_(array->GetHeap()->incremental_marking()) {
+  marking_->EnterNoMarkingScope();
+  if (array->number_of_descriptors() > 0) {
+    ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+    ASSERT(Marking::Color(array->GetContentArray()) == Marking::WHITE_OBJECT);
+  }
+}
+
+
+DescriptorArray::WhitenessWitness::~WhitenessWitness() {
+  marking_->LeaveNoMarkingScope();
 }
 
 
@@ -2077,14 +1989,13 @@
 template<typename Shape, typename Key>
 int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
   uint32_t capacity = Capacity();
-  uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity);
+  uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
   uint32_t count = 1;
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    // Empty entry.
-    if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
-    if (element != isolate->heap()->raw_unchecked_null_value() &&
+    if (element == isolate->heap()->undefined_value()) break;  // Empty entry.
+    if (element != isolate->heap()->the_hole_value() &&
         Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
@@ -2092,14 +2003,14 @@
 }
 
 
-bool SeededNumberDictionary::requires_slow_elements() {
+bool NumberDictionary::requires_slow_elements() {
   Object* max_index_object = get(kMaxNumberKeyIndex);
   if (!max_index_object->IsSmi()) return false;
   return 0 !=
       (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
 }
 
-uint32_t SeededNumberDictionary::max_number_key() {
+uint32_t NumberDictionary::max_number_key() {
   ASSERT(!requires_slow_elements());
   Object* max_index_object = get(kMaxNumberKeyIndex);
   if (!max_index_object->IsSmi()) return 0;
@@ -2107,7 +2018,7 @@
   return value >> kRequiresSlowElementsTagSize;
 }
 
-void SeededNumberDictionary::set_requires_slow_elements() {
+void NumberDictionary::set_requires_slow_elements() {
   set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
 }
 
@@ -2124,6 +2035,7 @@
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(CompilationCacheTable)
 CAST_ACCESSOR(CodeCacheHashTable)
 CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
@@ -2156,9 +2068,12 @@
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSProxy)
 CAST_ACCESSOR(JSFunctionProxy)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSMap)
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
 CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2185,6 +2100,7 @@
 
 
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
 
@@ -2341,7 +2257,7 @@
 
 
 void SlicedString::set_parent(String* parent) {
-  ASSERT(parent->IsSeqString());
+  ASSERT(parent->IsSeqString() || parent->IsExternalString());
   WRITE_FIELD(this, kParentOffset, parent);
 }
 
@@ -2361,7 +2277,7 @@
 
 void ConsString::set_first(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kFirstOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
 }
 
 
@@ -2377,29 +2293,83 @@
 
 void ConsString::set_second(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kSecondOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
 }
 
 
-ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+bool ExternalString::is_short() {
+  InstanceType type = map()->instance_type();
+  return (type & kShortExternalStringMask) == kShortExternalStringTag;
+}
+
+
+const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
+void ExternalAsciiString::update_data_cache() {
+  if (is_short()) return;
+  const char** data_field =
+      reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+  *data_field = resource()->data();
+}
+
+
 void ExternalAsciiString::set_resource(
-    ExternalAsciiString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalAsciiString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
+  if (resource != NULL) update_data_cache();
 }
 
 
-ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+const char* ExternalAsciiString::GetChars() {
+  return resource()->data();
+}
+
+
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
+}
+
+
+const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
+void ExternalTwoByteString::update_data_cache() {
+  if (is_short()) return;
+  const uint16_t** data_field =
+      reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+  *data_field = resource()->data();
+}
+
+
 void ExternalTwoByteString::set_resource(
-    ExternalTwoByteString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalTwoByteString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
+  if (resource != NULL) update_data_cache();
+}
+
+
+const uint16_t* ExternalTwoByteString::GetChars() {
+  return resource()->data();
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
+}
+
+
+const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
+      unsigned start) {
+  return GetChars() + start;
 }
 
 
@@ -2699,6 +2669,9 @@
   if (instance_type == BYTE_ARRAY_TYPE) {
     return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
   }
+  if (instance_type == FREE_SPACE_TYPE) {
+    return reinterpret_cast<FreeSpace*>(this)->size();
+  }
   if (instance_type == STRING_TYPE) {
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2860,12 +2833,6 @@
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
 }
@@ -2937,6 +2904,19 @@
 }
 
 
+bool Code::is_pregenerated() {
+  return kind() == STUB && IsPregeneratedField::decode(flags());
+}
+
+
+void Code::set_is_pregenerated(bool value) {
+  ASSERT(kind() == STUB);
+  Flags f = flags();
+  f = static_cast<Flags>(IsPregeneratedField::update(f, value));
+  set_flags(f);
+}
+
+
 bool Code::optimizable() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -2979,6 +2959,21 @@
 }
 
 
+bool Code::is_compiled_optimizable() {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsIsCompiledOptimizable::decode(flags);
+}
+
+
+void Code::set_compiled_optimizable(bool value) {
+  ASSERT(kind() == FUNCTION);
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
 int Code::allow_osr_at_loop_nesting_level() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@@ -3102,6 +3097,19 @@
   WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
 }
 
+
+bool Code::has_function_cache() {
+  ASSERT(kind() == STUB);
+  return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+}
+
+
+void Code::set_has_function_cache(bool flag) {
+  ASSERT(kind() == STUB);
+  WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+}
+
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3187,48 +3195,6 @@
 }
 
 
-Isolate* Map::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* Map::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Heap* Code::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* Code::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* JSGlobalPropertyCell::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* JSGlobalPropertyCell::isolate() {
-  return heap()->isolate();
-}
-
-
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3243,46 +3209,7 @@
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
   ASSERT(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
-}
-
-
-MaybeObject* Map::GetFastElementsMap() {
-  if (has_fast_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_ELEMENTS);
-  isolate()->counters()->map_to_fast_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetFastDoubleElementsMap() {
-  if (has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
-  isolate()->counters()->map_to_fast_double_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetSlowElementsMap() {
-  if (!has_fast_elements() && !has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(DICTIONARY_ELEMENTS);
-  isolate()->counters()->map_to_slow_elements()->Increment();
-  return new_map;
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
 }
 
 
@@ -3317,7 +3244,8 @@
                                    WriteBarrierMode mode) {
   Object* object = READ_FIELD(this,
                               kInstanceDescriptorsOrBitField3Offset);
-  if (value == isolate()->heap()->empty_descriptor_array()) {
+  Heap* heap = GetHeap();
+  if (value == heap->empty_descriptor_array()) {
     clear_instance_descriptors();
     return;
   } else {
@@ -3330,10 +3258,8 @@
   }
   ASSERT(!is_shared());
   WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(),
-                            this,
-                            kInstanceDescriptorsOrBitField3Offset,
-                            mode);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
 }
 
 
@@ -3362,14 +3288,22 @@
 }
 
 
+FixedArray* Map::unchecked_prototype_transitions() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
-                 kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction,
+          next_function_link,
+          Object,
+          kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3458,8 +3392,8 @@
 #endif
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3608,8 +3542,39 @@
 }
 
 
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
-               kStrictModeFunction)
+LanguageMode SharedFunctionInfo::language_mode() {
+  int hints = compiler_hints();
+  if (BooleanBit::get(hints, kExtendedModeFunction)) {
+    ASSERT(BooleanBit::get(hints, kStrictModeFunction));
+    return EXTENDED_MODE;
+  }
+  return BooleanBit::get(hints, kStrictModeFunction)
+      ? STRICT_MODE : CLASSIC_MODE;
+}
+
+
+void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
+  // We only allow language mode transitions that go set the same language mode
+  // again or go up in the chain:
+  //   CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
+  ASSERT(this->language_mode() == CLASSIC_MODE ||
+         this->language_mode() == language_mode ||
+         language_mode == EXTENDED_MODE);
+  int hints = compiler_hints();
+  hints = BooleanBit::set(
+      hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
+  hints = BooleanBit::set(
+      hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+  set_compiler_hints(hints);
+}
+
+
+bool SharedFunctionInfo::is_classic_mode() {
+  return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
+}
+
+BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
+            kExtendedModeFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
                name_should_print_as_anonymous,
@@ -3665,20 +3630,23 @@
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kCodeOffset, value);
-  ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
+  CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
 }
 
 
-SerializedScopeInfo* SharedFunctionInfo::scope_info() {
-  return reinterpret_cast<SerializedScopeInfo*>(
-      READ_FIELD(this, kScopeInfoOffset));
+ScopeInfo* SharedFunctionInfo::scope_info() {
+  return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
 }
 
 
-void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
                                         WriteBarrierMode mode) {
   WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kScopeInfoOffset,
+                            reinterpret_cast<Object*>(value),
+                            mode);
 }
 
 
@@ -3726,8 +3694,8 @@
 
 
 void SharedFunctionInfo::set_code_age(int code_age) {
-  set_compiler_hints(compiler_hints() |
-                     ((code_age & kCodeAgeMask) << kCodeAgeShift));
+  int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
+  set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
 }
 
 
@@ -3775,10 +3743,13 @@
 
 
 void JSFunction::set_code(Code* value) {
-  // Skip the write barrier because code is never in new space.
   ASSERT(!HEAP->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+  GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
+      this,
+      HeapObject::RawField(this, kCodeEntryOffset),
+      value);
 }
 
 
@@ -3818,7 +3789,7 @@
 void JSFunction::set_context(Object* value) {
   ASSERT(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
-  WRITE_BARRIER(this, kContextOffset);
+  WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
 }
 
 ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3877,7 +3848,36 @@
 }
 
 
+FixedArray* JSFunction::literals() {
+  ASSERT(!shared()->bound());
+  return literals_or_bindings();
+}
+
+
+void JSFunction::set_literals(FixedArray* literals) {
+  ASSERT(!shared()->bound());
+  set_literals_or_bindings(literals);
+}
+
+
+FixedArray* JSFunction::function_bindings() {
+  ASSERT(shared()->bound());
+  return literals_or_bindings();
+}
+
+
+void JSFunction::set_function_bindings(FixedArray* bindings) {
+  ASSERT(shared()->bound());
+  // Bound function literal may be initialized to the empty fixed array
+  // before the bindings are set.
+  ASSERT(bindings == GetHeap()->empty_fixed_array() ||
+         bindings->map() == GetHeap()->fixed_cow_array_map());
+  set_literals_or_bindings(bindings);
+}
+
+
 int JSFunction::NumberOfLiterals() {
+  ASSERT(!shared()->bound());
   return literals()->length();
 }
 
@@ -3892,7 +3892,7 @@
                                               Object* value) {
   ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
-  WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
+  WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
 }
 
 
@@ -3911,6 +3911,7 @@
 
 
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSProxy, hash, Object, kHashOffset)
 ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
 ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
 
@@ -3923,22 +3924,19 @@
 }
 
 
-ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
-ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSSet, table, Object, kTableOffset)
+ACCESSORS(JSMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, next, Object, kNextOffset)
 
 
-ObjectHashTable* JSWeakMap::unchecked_table() {
-  return reinterpret_cast<ObjectHashTable*>(READ_FIELD(this, kTableOffset));
+Address Foreign::foreign_address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
 }
 
 
-Address Foreign::address() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
-}
-
-
-void Foreign::set_address(Address value) {
-  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
+void Foreign::set_foreign_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
 }
 
 
@@ -3970,6 +3968,7 @@
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
 ACCESSORS(Code, next_code_flushing_candidate,
           Object, kNextCodeFlushingCandidateOffset)
@@ -4016,9 +4015,8 @@
 }
 
 
-bool Code::contains(byte* pc) {
-  return (instruction_start() <= pc) &&
-      (pc <= instruction_start() + instruction_size());
+bool Code::contains(byte* inner_pointer) {
+  return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
 }
 
 
@@ -4097,6 +4095,7 @@
   if (value->IsSmi()) {
     fa->set_unchecked(index, Smi::cast(value));
   } else {
+    // We only do this during GC, so we don't need to notify the write barrier.
     fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
   }
 }
@@ -4104,15 +4103,22 @@
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
-  ASSERT((kind == FAST_ELEMENTS &&
-          (elements()->map() == GetHeap()->fixed_array_map() ||
-           elements()->map() == GetHeap()->fixed_cow_array_map())) ||
-         (kind == FAST_DOUBLE_ELEMENTS &&
-          elements()->IsFixedDoubleArray()) ||
-         (kind == DICTIONARY_ELEMENTS &&
-          elements()->IsFixedArray() &&
-          elements()->IsDictionary()) ||
-         (kind > DICTIONARY_ELEMENTS));
+#if DEBUG
+  FixedArrayBase* fixed_array =
+      reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+  Map* map = fixed_array->map();
+    ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
+            (map == GetHeap()->fixed_array_map() ||
+             map == GetHeap()->fixed_cow_array_map())) ||
+           (kind == FAST_DOUBLE_ELEMENTS &&
+            fixed_array->IsFixedDoubleArray()) ||
+           (kind == DICTIONARY_ELEMENTS &&
+            fixed_array->IsFixedArray() &&
+            fixed_array->IsDictionary()) ||
+           (kind > DICTIONARY_ELEMENTS));
+    ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+           (elements()->IsFixedArray() && elements()->length() >= 2));
+#endif
   return kind;
 }
 
@@ -4127,6 +4133,18 @@
 }
 
 
+bool JSObject::HasFastSmiOnlyElements() {
+  return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
+}
+
+
+bool JSObject::HasFastTypeElements() {
+  ElementsKind elements_kind = GetElementsKind();
+  return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+      elements_kind == FAST_ELEMENTS;
+}
+
+
 bool JSObject::HasFastDoubleElements() {
   return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
 }
@@ -4137,6 +4155,11 @@
 }
 
 
+bool JSObject::HasNonStrictArgumentsElements() {
+  return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+}
+
+
 bool JSObject::HasExternalArrayElements() {
   HeapObject* array = elements();
   ASSERT(array != NULL);
@@ -4188,7 +4211,7 @@
 
 
 MaybeObject* JSObject::EnsureWritableFastElements() {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elems = FixedArray::cast(elements());
   Isolate* isolate = GetIsolate();
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4211,9 +4234,9 @@
 }
 
 
-SeededNumberDictionary* JSObject::element_dictionary() {
+NumberDictionary* JSObject::element_dictionary() {
   ASSERT(HasDictionaryElements());
-  return SeededNumberDictionary::cast(elements());
+  return NumberDictionary::cast(elements());
 }
 
 
@@ -4236,15 +4259,13 @@
 }
 
 
-StringHasher::StringHasher(int length, uint32_t seed)
+StringHasher::StringHasher(int length)
   : length_(length),
-    raw_running_hash_(seed),
+    raw_running_hash_(0),
     array_index_(0),
     is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
     is_first_char_(true),
-    is_valid_(true) {
-  ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
-}
+    is_valid_(true) { }
 
 
 bool StringHasher::has_trivial_hash() {
@@ -4296,7 +4317,7 @@
   result += (result << 3);
   result ^= (result >> 11);
   result += (result << 15);
-  if ((result & String::kHashBitMask) == 0) {
+  if (result == 0) {
     result = 27;
   }
   return result;
@@ -4304,8 +4325,8 @@
 
 
 template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
-  StringHasher hasher(length, seed);
+uint32_t HashSequentialString(const schar* chars, int length) {
+  StringHasher hasher(length);
   if (!hasher.has_trivial_hash()) {
     int i;
     for (i = 0; hasher.is_array_index() && (i < length); i++) {
@@ -4366,44 +4387,18 @@
 }
 
 
-bool JSObject::HasHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  return GetPropertyAttributePostInterceptor(this,
-                                             GetHeap()->hidden_symbol(),
-                                             false) != ABSENT;
+MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+  return IsJSProxy()
+      ? JSProxy::cast(this)->GetIdentityHash(flag)
+      : JSObject::cast(this)->GetIdentityHash(flag);
 }
 
 
-Object* JSObject::GetHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  PropertyAttributes attributes;
-  // You can't install a getter on a property indexed by the hidden symbol,
-  // so we can be sure that GetLocalPropertyPostInterceptor returns a real
-  // object.
-  Object* result =
-      GetLocalPropertyPostInterceptor(this,
-                                      GetHeap()->hidden_symbol(),
-                                      &attributes)->ToObjectUnchecked();
-  return result;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
-  ASSERT(!IsJSGlobalProxy());
-  return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
-                                    hidden_obj,
-                                    DONT_ENUM,
-                                    kNonStrictMode);
-}
-
-
-bool JSObject::HasHiddenProperties() {
-  return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
-}
-
-
-bool JSObject::HasElement(uint32_t index) {
-  return HasElementWithReceiver(this, index);
+bool JSReceiver::HasElement(uint32_t index) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->HasElementWithHandler(index);
+  }
+  return JSObject::cast(this)->HasElementWithReceiver(this, index);
 }
 
 
@@ -4466,7 +4461,7 @@
   WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
   FixedArray::set(index, key, mode);
   FixedArray::set(index+1, value, mode);
-  FixedArray::fast_set(this, index+2, details.AsSmi());
+  FixedArray::set(index+2, details.AsSmi());
 }
 
 
@@ -4476,27 +4471,16 @@
 }
 
 
-uint32_t UnseededNumberDictionaryShape::Hash(uint32_t key) {
-  return ComputeIntegerHash(key, 0);
+uint32_t NumberDictionaryShape::Hash(uint32_t key) {
+  return ComputeIntegerHash(key);
 }
 
 
-uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
-                                                      Object* other) {
+uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
   ASSERT(other->IsNumber());
-  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
+  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
 }
 
-uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
-  return ComputeIntegerHash(key, seed);
-}
-
-uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
-                                                          uint32_t seed,
-                                                          Object* other) {
-  ASSERT(other->IsNumber());
-  return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
-}
 
 MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
   return Isolate::Current()->heap()->NumberFromUint32(key);
@@ -4526,36 +4510,33 @@
 }
 
 
-bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
-  return key == JSObject::cast(other);
+template <int entrysize>
+bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+  return key->SameValue(other);
 }
 
 
-uint32_t ObjectHashTableShape::Hash(JSObject* key) {
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
-  ASSERT(!maybe_hash->IsFailure());
-  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
+  MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+  return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
 
 
-uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
-  MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
-      JSObject::OMIT_CREATION);
-  ASSERT(!maybe_hash->IsFailure());
-  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+template <int entrysize>
+uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
+                                                        Object* other) {
+  MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
+  return Smi::cast(maybe_hash->ToObjectChecked())->value();
 }
 
 
-MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+template <int entrysize>
+MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
   return key;
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry) {
-  RemoveEntry(entry, GetHeap());
-}
-
-
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
@@ -4566,7 +4547,7 @@
 
 
 void JSArray::EnsureSize(int required_size) {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elts = FixedArray::cast(elements());
   const int kArraySizeThatFitsComfortablyInNewSpace = 128;
   if (elts->length() < required_size) {
@@ -4584,13 +4565,17 @@
 
 
 void JSArray::set_length(Smi* length) {
+  // Don't need a write barrier for a Smi.
   set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
 }
 
 
-void JSArray::SetContent(FixedArray* storage) {
+MaybeObject* JSArray::SetContent(FixedArray* storage) {
+  MaybeObject* maybe_object = EnsureCanContainElements(storage);
+  if (maybe_object->IsFailure()) return maybe_object;
   set_length(Smi::FromInt(storage->length()));
   set_elements(storage);
+  return this;
 }
 
 
@@ -4600,6 +4585,12 @@
 }
 
 
+MaybeObject* FixedDoubleArray::Copy() {
+  if (length() == 0) return this;
+  return GetHeap()->CopyFixedDoubleArray(this);
+}
+
+
 Relocatable::Relocatable(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   isolate_ = isolate;
@@ -4622,14 +4613,14 @@
 
 void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
 void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
+      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
 }
 
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 0398572..4b5d049 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -82,12 +82,18 @@
     case HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberPrint(out);
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
+      break;
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayPrint(out);
       break;
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayPrint(out);
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpacePrint(out);
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
       break;
@@ -189,6 +195,11 @@
 }
 
 
+void FreeSpace::FreeSpacePrint(FILE* out) {
+  PrintF(out, "free space, size %d", Size());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
   PrintF(out, "external pixel array");
 }
@@ -256,16 +267,37 @@
           descs->GetCallbacksObject(i)->ShortPrint(out);
           PrintF(out, " (callback)\n");
           break;
+        case ELEMENTS_TRANSITION: {
+          PrintF(out, "(elements transition to ");
+          Object* descriptor_contents = descs->GetValue(i);
+          if (descriptor_contents->IsMap()) {
+            Map* map = Map::cast(descriptor_contents);
+            PrintElementsKind(out, map->elements_kind());
+          } else {
+            FixedArray* map_array = FixedArray::cast(descriptor_contents);
+            for (int i = 0; i < map_array->length(); ++i) {
+              Map* map = Map::cast(map_array->get(i));
+              if (i != 0) {
+                PrintF(out, ", ");
+              }
+              PrintElementsKind(out, map->elements_kind());
+            }
+          }
+          PrintF(out, ")\n");
+          break;
+        }
         case MAP_TRANSITION:
-          PrintF(out, " (map transition)\n");
+          PrintF(out, "(map transition)\n");
           break;
         case CONSTANT_TRANSITION:
-          PrintF(out, " (constant transition)\n");
+          PrintF(out, "(constant transition)\n");
           break;
         case NULL_DESCRIPTOR:
-          PrintF(out, " (null descriptor)\n");
+          PrintF(out, "(null descriptor)\n");
           break;
-        default:
+        case NORMAL:  // only in slow mode
+        case HANDLER:  // only in lookup results, not in descriptors
+        case INTERCEPTOR:  // only in lookup results, not in descriptors
           UNREACHABLE();
           break;
       }
@@ -277,7 +309,10 @@
 
 
 void JSObject::PrintElements(FILE* out) {
-  switch (GetElementsKind()) {
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  switch (map()->elements_kind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
@@ -385,8 +420,13 @@
 
 void JSObject::JSObjectPrint(FILE* out) {
   PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
+  PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  PrintElementsKind(out, this->map()->elements_kind());
+  PrintF(out,
+         "]\n - prototype = %p\n",
+         reinterpret_cast<void*>(GetPrototype()));
   PrintF(out, " {\n");
   PrintProperties(out);
   PrintElements(out);
@@ -406,6 +446,9 @@
     case EXTERNAL_ASCII_SYMBOL_TYPE:
     case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+    case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
+    case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
+    case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
     case ASCII_STRING_TYPE: return "ASCII_STRING";
     case STRING_TYPE: return "TWO_BYTE_STRING";
     case CONS_STRING_TYPE:
@@ -413,8 +456,12 @@
     case EXTERNAL_ASCII_STRING_TYPE:
     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
+    case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+    case FREE_SPACE_TYPE: return "FREE_SPACE";
     case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
     case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -458,7 +505,9 @@
   PrintF(out, " - type: %s\n", TypeToString(instance_type()));
   PrintF(out, " - instance size: %d\n", instance_size());
   PrintF(out, " - inobject properties: %d\n", inobject_properties());
-  PrintF(out, " - pre-allocated property fields: %d\n",
+  PrintF(out, " - elements kind: ");
+  PrintElementsKind(out, elements_kind());
+  PrintF(out, "\n - pre-allocated property fields: %d\n",
       pre_allocated_property_fields());
   PrintF(out, " - unused property fields: %d\n", unused_property_fields());
   if (is_hidden_prototype()) {
@@ -516,6 +565,16 @@
 }
 
 
+void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "FixedDoubleArray");
+  PrintF(out, " - length: %d", length());
+  for (int i = 0; i < length(); i++) {
+    PrintF(out, "\n  [%d]: %g", i, get_scalar(i));
+  }
+  PrintF(out, "\n");
+}
+
+
 void JSValue::JSValuePrint(FILE* out) {
   HeapObject::PrintHeader(out, "ValueObject");
   value()->Print(out);
@@ -587,6 +646,8 @@
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - handler = ");
   handler()->Print(out);
+  PrintF(out, " - hash = ");
+  hash()->Print(out);
   PrintF(out, "\n");
 }
 
@@ -607,7 +668,6 @@
 void JSWeakMap::JSWeakMapPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSWeakMap");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
   PrintF(out, " - table = ");
   table()->ShortPrint(out);
   PrintF(out, "\n");
@@ -707,7 +767,7 @@
 
 
 void Foreign::ForeignPrint(FILE* out) {
-  PrintF(out, "foreign address : %p", address());
+  PrintF(out, "foreign address : %p", foreign_address());
 }
 
 
@@ -802,10 +862,15 @@
 
 void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "ObjectTemplateInfo");
+  PrintF(out, " - tag: ");
+  tag()->ShortPrint(out);
+  PrintF(out, "\n - property_list: ");
+  property_list()->ShortPrint(out);
   PrintF(out, "\n - constructor: ");
   constructor()->ShortPrint(out);
   PrintF(out, "\n - internal_field_count: ");
   internal_field_count()->ShortPrint(out);
+  PrintF(out, "\n");
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
new file mode 100644
index 0000000..12b044c
--- /dev/null
+++ b/src/objects-visiting-inl.h
@@ -0,0 +1,151 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template<typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+  table_.Register(kVisitShortcutCandidate,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitConsString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SlicedString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedArray,
+                  &FlexibleBodyVisitor<StaticVisitor,
+                  FixedArray::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+  table_.Register(kVisitGlobalContext,
+                  &FixedBodyVisitor<StaticVisitor,
+                  Context::ScavengeBodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitByteArray, &VisitByteArray);
+
+  table_.Register(kVisitSharedFunctionInfo,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SharedFunctionInfo::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+  table_.Register(kVisitJSFunction,
+                  &JSObjectVisitor::
+                      template VisitSpecialized<JSFunction::kSize>);
+
+  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+  table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+  table_.template RegisterSpecializations<DataObjectVisitor,
+                                          kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor,
+                                          kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+  table_.template RegisterSpecializations<StructVisitor,
+                                          kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // templated CodeIterateBody (below).  They should be kept in sync.
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kHandlerTableOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(v);
+  }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // non-templated CodeIterateBody (above).  They should be kept in sync.
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 0aa21dd..9ca102b 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -64,7 +64,7 @@
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject,
                                    kVisitDataObjectGeneric,
-                                   ExternalString::kSize);
+                                   instance_size);
     }
     UNREACHABLE();
   }
@@ -73,6 +73,9 @@
     case BYTE_ARRAY_TYPE:
       return kVisitByteArray;
 
+    case FREE_SPACE_TYPE:
+      return kVisitFreeSpace;
+
     case FIXED_ARRAY_TYPE:
       return kVisitFixedArray;
 
@@ -91,6 +94,16 @@
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       return kVisitPropertyCell;
 
+    case JS_SET_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSSet::kSize);
+
+    case JS_MAP_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSMap::kSize);
+
     case JS_WEAK_MAP_TYPE:
       return kVisitJSWeakMap;
 
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 4ce1bd0..26e79ae 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -30,22 +30,6 @@
 
 #include "allocation.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -67,6 +51,7 @@
     kVisitSeqTwoByteString,
     kVisitShortcutCandidate,
     kVisitByteArray,
+    kVisitFreeSpace,
     kVisitFixedArray,
     kVisitFixedDoubleArray,
     kVisitGlobalContext,
@@ -150,7 +135,7 @@
            (base == kVisitJSObject));
     ASSERT(IsAligned(object_size, kPointerSize));
     ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size < Page::kMaxHeapObjectSize);
+    ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
 
     const VisitorId specialization = static_cast<VisitorId>(
         base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -172,6 +157,10 @@
     }
   }
 
+  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+    return reinterpret_cast<Callback>(callbacks_[id]);
+  }
+
   inline Callback GetVisitor(Map* map) {
     return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
   }
@@ -236,7 +225,7 @@
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     int object_size = BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -247,7 +236,7 @@
   static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
     ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -261,7 +250,7 @@
  public:
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         BodyDescriptor::kEndOffset);
@@ -289,63 +278,7 @@
 template<typename StaticVisitor>
 class StaticNewSpaceVisitor : public StaticVisitorBase {
  public:
-  static void Initialize() {
-    table_.Register(kVisitShortcutCandidate,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitConsString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitSlicedString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SlicedString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitFixedArray,
-                    &FlexibleBodyVisitor<StaticVisitor,
-                                         FixedArray::BodyDescriptor,
-                                         int>::Visit);
-
-    table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      Context::ScavengeBodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitByteArray, &VisitByteArray);
-
-    table_.Register(kVisitSharedFunctionInfo,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SharedFunctionInfo::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitJSWeakMap, &VisitJSObject);
-
-    table_.Register(kVisitJSRegExp, &VisitJSObject);
-
-    table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
-    table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
-    table_.Register(kVisitJSFunction,
-                    &JSObjectVisitor::
-                        template VisitSpecialized<JSFunction::kSize>);
-
-    table_.RegisterSpecializations<DataObjectVisitor,
-                                   kVisitDataObject,
-                                   kVisitDataObjectGeneric>();
-    table_.RegisterSpecializations<JSObjectVisitor,
-                                   kVisitJSObject,
-                                   kVisitJSObjectGeneric>();
-    table_.RegisterSpecializations<StructVisitor,
-                                   kVisitStruct,
-                                   kVisitStructGeneric>();
-  }
+  static void Initialize();
 
   static inline int IterateBody(Map* map, HeapObject* obj) {
     return table_.GetVisitor(map)(map, obj);
@@ -379,6 +312,10 @@
         SeqTwoByteStringSize(map->instance_type());
   }
 
+  static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+    return FreeSpace::cast(object)->Size();
+  }
+
   class DataObjectVisitor {
    public:
     template<int object_size>
@@ -410,55 +347,6 @@
   StaticNewSpaceVisitor<StaticVisitor>::table_;
 
 
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(v);
-  }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/objects.cc b/src/objects.cc
index 88ebbf4..1565504 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -39,7 +39,9 @@
 #include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "macro-assembler.h"
+#include "mark-compact.h"
 #include "safepoint-table.h"
 #include "string-stream.h"
 #include "utils.h"
@@ -53,10 +55,53 @@
 namespace v8 {
 namespace internal {
 
-// Getters and setters are stored in a fixed array property.  These are
-// constants for their indices.
-const int kGetterIndex = 0;
-const int kSetterIndex = 1;
+void PrintElementsKind(FILE* out, ElementsKind kind) {
+  switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
+      PrintF(out, "FAST_SMI_ONLY_ELEMENTS");
+      break;
+    case FAST_ELEMENTS:
+      PrintF(out, "FAST_ELEMENTS");
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+      PrintF(out, "FAST_DOUBLE_ELEMENTS");
+      break;
+    case DICTIONARY_ELEMENTS:
+      PrintF(out, "DICTIONARY_ELEMENTS");
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      PrintF(out, "NON_STRICT_ARGUMENTS_ELEMENTS");
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+      PrintF(out, "EXTERNAL_BYTE_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_BYTE_ELEMENTS");
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      PrintF(out, "EXTERNAL_SHORT_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_SHORT_ELEMENTS");
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+      PrintF(out, "EXTERNAL_INT_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_INT_ELEMENTS");
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      PrintF(out, "EXTERNAL_FLOAT_ELEMENTS");
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+      PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+      break;
+  }
+}
+
 
 MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
                                                   Object* value) {
@@ -132,34 +177,27 @@
 
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
-  if (IsSmi()) {
-    Context* global_context = Isolate::Current()->context()->global_context();
-    holder = global_context->number_function()->instance_prototype();
+  if (IsJSReceiver()) {
+    holder = this;
   } else {
-    HeapObject* heap_object = HeapObject::cast(this);
-    if (heap_object->IsJSObject()) {
-      return JSObject::cast(this)->Lookup(name, result);
-    } else if (heap_object->IsJSProxy()) {
-      return result->HandlerResult();
-    }
     Context* global_context = Isolate::Current()->context()->global_context();
-    if (heap_object->IsString()) {
-      holder = global_context->string_function()->instance_prototype();
-    } else if (heap_object->IsHeapNumber()) {
+    if (IsNumber()) {
       holder = global_context->number_function()->instance_prototype();
-    } else if (heap_object->IsBoolean()) {
+    } else if (IsString()) {
+      holder = global_context->string_function()->instance_prototype();
+    } else if (IsBoolean()) {
       holder = global_context->boolean_function()->instance_prototype();
     }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
-  JSObject::cast(holder)->Lookup(name, result);
+  JSReceiver::cast(holder)->Lookup(name, result);
 }
 
 
 MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
                                              String* name,
                                              PropertyAttributes* attributes) {
-  LookupResult result;
+  LookupResult result(name->GetIsolate());
   Lookup(name, &result);
   MaybeObject* value = GetProperty(receiver, &result, name, attributes);
   ASSERT(*attributes <= ABSENT);
@@ -167,10 +205,9 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
-                                             Object* structure,
-                                             String* name,
-                                             Object* holder) {
+MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
+                                               Object* structure,
+                                               String* name) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
   // data structure used to store the callbacks.  Eventually foreign
@@ -178,7 +215,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -191,10 +228,9 @@
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
     JSObject* self = JSObject::cast(receiver);
-    JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
     LOG(isolate, ApiNamedPropertyAccess("load", self, name));
-    CustomArguments args(isolate, data->data(), self, holder_handle);
+    CustomArguments args(isolate, data->data(), self, this);
     v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
@@ -212,9 +248,9 @@
   // __defineGetter__ callback
   if (structure->IsFixedArray()) {
     Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -225,47 +261,72 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
-                                            String* name_raw,
-                                            Object* handler_raw) {
-  Isolate* isolate = name_raw->GetIsolate();
+MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
+                                             String* name_raw) {
+  Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
   Handle<Object> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(handler_raw);
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name };
+  Handle<Object> result = CallTrap(
+    "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    // Get the derived `get' property.
-    trap = isolate->derived_get_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { receiver.location(), name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return *result;
 }
 
 
+Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
+  Isolate* isolate = object->IsHeapObject()
+      ? Handle<HeapObject>::cast(object)->GetIsolate()
+      : Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+}
+
+
+MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
+                                            uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return GetPropertyWithHandler(receiver, name);
+}
+
+
+MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
+                                            Object* value,
+                                            StrictModeFlag strict_mode) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return SetPropertyWithHandler(name, value, NONE, strict_mode);
+}
+
+
+bool JSProxy::HasElementWithHandler(uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return HasPropertyWithHandler(name);
+}
+
+
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
-                                                  JSFunction* getter) {
+                                                  JSReceiver* getter) {
   HandleScope scope;
-  Handle<JSFunction> fun(JSFunction::cast(getter));
+  Handle<JSReceiver> fun(getter);
   Handle<Object> self(receiver);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = fun->GetHeap()->isolate()->debug();
   // Handle stepping into a getter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
+
   bool has_pending_exception;
   Handle<Object> result =
       Execution::Call(fun, self, 0, NULL, &has_pending_exception);
@@ -290,10 +351,8 @@
           AccessorInfo* info = AccessorInfo::cast(obj);
           if (info->all_can_read()) {
             *attributes = result->GetAttributes();
-            return GetPropertyWithCallback(receiver,
-                                           result->GetCallbackObject(),
-                                           name,
-                                           result->holder());
+            return result->holder()->GetPropertyWithCallback(
+                receiver, result->GetCallbackObject(), name);
           }
         }
         break;
@@ -302,7 +361,7 @@
       case FIELD:
       case CONSTANT_FUNCTION: {
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -315,7 +374,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedProperty(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -362,7 +421,7 @@
       case CONSTANT_FUNCTION: {
         if (!continue_search) break;
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyAttributeWithFailedAccessCheck(receiver,
@@ -376,7 +435,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r;
+        LookupResult r(GetIsolate());
         if (continue_search) {
           result->holder()->LookupRealNamedProperty(name, &r);
         } else {
@@ -396,7 +455,7 @@
     }
   }
 
-  GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
   return ABSENT;
 }
 
@@ -486,7 +545,7 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(cell->heap()->the_hole_value());
+      cell->set_value(cell->GetHeap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -520,6 +579,21 @@
 }
 
 
+Handle<Object> Object::GetProperty(Handle<Object> object,
+                                   Handle<Object> receiver,
+                                   LookupResult* result,
+                                   Handle<String> key,
+                                   PropertyAttributes* attributes) {
+  Isolate* isolate = object->IsHeapObject()
+      ? Handle<HeapObject>::cast(object)->GetIsolate()
+      : Isolate::Current();
+  CALL_HEAP_FUNCTION(
+      isolate,
+      object->GetProperty(*receiver, result, *key, attributes),
+      Object);
+}
+
+
 MaybeObject* Object::GetProperty(Object* receiver,
                                  LookupResult* result,
                                  String* name,
@@ -537,7 +611,9 @@
   // holder in the prototype chain.
   // Proxy handlers do not use the proxy's prototype, so we can skip this.
   if (!result->IsHandler()) {
-    Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+    Object* last = result->IsProperty()
+        ? result->holder()
+        : Object::cast(heap->null_value());
     ASSERT(this != this->GetPrototype());
     for (Object* current = this; true; current = current->GetPrototype()) {
       if (current->IsAccessCheckNeeded()) {
@@ -566,30 +642,26 @@
   }
   *attributes = result->GetAttributes();
   Object* value;
-  JSObject* holder = result->holder();
   switch (result->type()) {
     case NORMAL:
-      value = holder->GetNormalizedProperty(result);
+      value = result->holder()->GetNormalizedProperty(result);
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case FIELD:
-      value = holder->FastPropertyAt(result->GetFieldIndex());
+      value = result->holder()->FastPropertyAt(result->GetFieldIndex());
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case CONSTANT_FUNCTION:
       return result->GetConstantFunction();
     case CALLBACKS:
-      return GetPropertyWithCallback(receiver,
-                                     result->GetCallbackObject(),
-                                     name,
-                                     holder);
-    case HANDLER: {
-      JSProxy* proxy = JSProxy::cast(this);
-      return GetPropertyWithHandler(receiver, name, proxy->handler());
-    }
+      return result->holder()->GetPropertyWithCallback(
+          receiver, result->GetCallbackObject(), name);
+    case HANDLER:
+      return result->proxy()->GetPropertyWithHandler(receiver, name);
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
-      return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+      return result->holder()->GetPropertyWithInterceptor(
+          recvr, name, attributes);
     }
     case MAP_TRANSITION:
     case ELEMENTS_TRANSITION:
@@ -613,28 +685,21 @@
   for (holder = this;
        holder != heap->null_value();
        holder = holder->GetPrototype()) {
-    if (holder->IsSmi()) {
-      Context* global_context = Isolate::Current()->context()->global_context();
-      holder = global_context->number_function()->instance_prototype();
-    } else {
-      HeapObject* heap_object = HeapObject::cast(holder);
-      if (!heap_object->IsJSObject()) {
-        Isolate* isolate = heap->isolate();
-        Context* global_context = isolate->context()->global_context();
-        if (heap_object->IsString()) {
-          holder = global_context->string_function()->instance_prototype();
-        } else if (heap_object->IsHeapNumber()) {
-          holder = global_context->number_function()->instance_prototype();
-        } else if (heap_object->IsBoolean()) {
-          holder = global_context->boolean_function()->instance_prototype();
-        } else if (heap_object->IsJSProxy()) {
-          // TODO(rossberg): do something
-          return heap->undefined_value();  // For now...
-        } else {
-          // Undefined and null have no indexed properties.
-          ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
-          return heap->undefined_value();
-        }
+    if (!holder->IsJSObject()) {
+      Isolate* isolate = heap->isolate();
+      Context* global_context = isolate->context()->global_context();
+      if (holder->IsNumber()) {
+        holder = global_context->number_function()->instance_prototype();
+      } else if (holder->IsString()) {
+        holder = global_context->string_function()->instance_prototype();
+      } else if (holder->IsBoolean()) {
+        holder = global_context->boolean_function()->instance_prototype();
+      } else if (holder->IsJSProxy()) {
+        return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+      } else {
+        // Undefined and null have no indexed properties.
+        ASSERT(holder->IsUndefined() || holder->IsNull());
+        return heap->undefined_value();
       }
     }
 
@@ -701,6 +766,49 @@
 }
 
 
+MaybeObject* Object::GetHash(CreationFlag flag) {
+  // The object is either a number, a string, an odd-ball,
+  // a real JS object, or a Harmony proxy.
+  if (IsNumber()) {
+    uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
+    return Smi::FromInt(hash & Smi::kMaxValue);
+  }
+  if (IsString()) {
+    uint32_t hash = String::cast(this)->Hash();
+    return Smi::FromInt(hash);
+  }
+  if (IsOddball()) {
+    uint32_t hash = Oddball::cast(this)->to_string()->Hash();
+    return Smi::FromInt(hash);
+  }
+  if (IsJSReceiver()) {
+    return JSReceiver::cast(this)->GetIdentityHash(flag);
+  }
+
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+
+bool Object::SameValue(Object* other) {
+  if (other == this) return true;
+  if (!IsHeapObject() || !other->IsHeapObject()) return false;
+
+  // The object is either a number, a string, an odd-ball,
+  // a real JS object, or a Harmony proxy.
+  if (IsNumber() && other->IsNumber()) {
+    double this_value = Number();
+    double other_value = other->Number();
+    return (this_value == other_value) ||
+        (isnan(this_value) && isnan(other_value));
+  }
+  if (IsString() && other->IsString()) {
+    return String::cast(this)->Equals(String::cast(other));
+  }
+  return false;
+}
+
+
 void Object::ShortPrint(FILE* out) {
   HeapStringAllocator allocator;
   StringStream accumulator(&allocator);
@@ -818,7 +926,7 @@
                     len - first_length);
       }
       cs->set_first(result);
-      cs->set_second(heap->empty_string());
+      cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
       return result;
     }
     default:
@@ -844,39 +952,39 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
+  if (size < ExternalString::kShortSize) {
     return false;
   }
-  ASSERT(size >= ExternalString::kSize);
   bool is_ascii = this->IsAsciiRepresentation();
   bool is_symbol = this->IsSymbol();
-  int length = this->length();
-  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
   // reinitializing the fields.
-  this->set_map(is_ascii ?
-                heap->external_string_with_ascii_data_map() :
-                heap->external_string_map());
-  ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
-  self->set_length(length);
-  self->set_hash_field(hash_field);
-  self->set_resource(resource);
-  // Additionally make the object into an external symbol if the original string
-  // was a symbol to start with.
-  if (is_symbol) {
-    self->Hash();  // Force regeneration of the hash value.
-    // Now morph this external string into a external symbol.
-    this->set_map(is_ascii ?
-                  heap->external_symbol_with_ascii_data_map() :
-                  heap->external_symbol_map());
+  if (size >= ExternalString::kSize) {
+    this->set_map(
+        is_symbol
+            ? (is_ascii ?  heap->external_symbol_with_ascii_data_map()
+                        :  heap->external_symbol_map())
+            : (is_ascii ?  heap->external_string_with_ascii_data_map()
+                        :  heap->external_string_map()));
+  } else {
+    this->set_map(
+        is_symbol
+            ? (is_ascii ?  heap->short_external_symbol_with_ascii_data_map()
+                        :  heap->short_external_symbol_map())
+            : (is_ascii ?  heap->short_external_string_with_ascii_data_map()
+                        :  heap->short_external_string_map()));
   }
+  ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
+  self->set_resource(resource);
+  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+  }
   return true;
 }
 
@@ -895,34 +1003,30 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
+  if (size < ExternalString::kShortSize) {
     return false;
   }
-  ASSERT(size >= ExternalString::kSize);
   bool is_symbol = this->IsSymbol();
-  int length = this->length();
-  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
-  // reinitializing the fields.
-  this->set_map(heap->external_ascii_string_map());
-  ExternalAsciiString* self = ExternalAsciiString::cast(this);
-  self->set_length(length);
-  self->set_hash_field(hash_field);
-  self->set_resource(resource);
-  // Additionally make the object into an external symbol if the original string
-  // was a symbol to start with.
-  if (is_symbol) {
-    self->Hash();  // Force regeneration of the hash value.
-    // Now morph this external string into a external symbol.
-    this->set_map(heap->external_ascii_symbol_map());
+  // reinitializing the fields.  Use short version if space is limited.
+  if (size >= ExternalString::kSize) {
+    this->set_map(is_symbol ? heap->external_ascii_symbol_map()
+                            : heap->external_ascii_string_map());
+  } else {
+    this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
+                            : heap->short_external_ascii_string_map());
   }
+  ExternalAsciiString* self = ExternalAsciiString::cast(this);
+  self->set_resource(resource);
+  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+  }
   return true;
 }
 
@@ -998,8 +1102,7 @@
       break;
     }
     case JS_WEAK_MAP_TYPE: {
-      int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
-      accumulator->Add("<JS WeakMap[%d]>", elements);
+      accumulator->Add("<JS WeakMap>");
       break;
     }
     case JS_REGEXP_TYPE: {
@@ -1027,7 +1130,7 @@
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
       Map* map_of_this = map();
-      Heap* heap = map_of_this->heap();
+      Heap* heap = GetHeap();
       Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
@@ -1049,7 +1152,6 @@
                        global_object ? "Global Object: " : "",
                        vowel ? "n" : "");
                 accumulator->Put(str);
-                accumulator->Put('>');
                 printed = true;
               }
             }
@@ -1070,8 +1172,28 @@
 }
 
 
+void JSObject::PrintElementsTransition(
+    FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+    ElementsKind to_kind, FixedArrayBase* to_elements) {
+  if (from_kind != to_kind) {
+    PrintF(file, "elements transition [");
+    PrintElementsKind(file, from_kind);
+    PrintF(file, " -> ");
+    PrintElementsKind(file, to_kind);
+    PrintF(file, "] in ");
+    JavaScriptFrame::PrintTop(file, false, true);
+    PrintF(file, " for ");
+    ShortPrint(file);
+    PrintF(file, " from ");
+    from_elements->ShortPrint(file);
+    PrintF(file, " to ");
+    to_elements->ShortPrint(file);
+    PrintF(file, "\n");
+  }
+}
+
+
 void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
-  // if (!HEAP->InNewSpace(this)) PrintF("*", this);
   Heap* heap = GetHeap();
   if (!heap->Contains(this)) {
     accumulator->Add("!!!INVALID POINTER!!!");
@@ -1094,14 +1216,21 @@
   }
   switch (map()->instance_type()) {
     case MAP_TYPE:
-      accumulator->Add("<Map>");
+      accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
       break;
     case FIXED_ARRAY_TYPE:
       accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      accumulator->Add("<FixedDoubleArray[%u]>",
+                       FixedDoubleArray::cast(this)->length());
+      break;
     case BYTE_ARRAY_TYPE:
       accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
       break;
+    case FREE_SPACE_TYPE:
+      accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       accumulator->Add("<ExternalPixelArray[%u]>",
                        ExternalPixelArray::cast(this)->length());
@@ -1241,6 +1370,8 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_SET_TYPE:
+    case JS_MAP_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
@@ -1277,6 +1408,7 @@
     case HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
+    case FREE_SPACE_TYPE:
     case EXTERNAL_PIXEL_ARRAY_TYPE:
     case EXTERNAL_BYTE_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1533,7 +1665,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
-  Heap* heap = old_map->heap();
+  Heap* heap = GetHeap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1609,7 +1741,7 @@
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
       return heap->undefined_value();
@@ -1651,13 +1783,21 @@
     PropertyAttributes attributes,
     StrictModeFlag strict_mode) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsFound()) {
     // An existing property, a map transition or a null descriptor was
     // found.  Use set property to handle all these cases.
     return SetProperty(&result, name, value, attributes, strict_mode);
   }
+  bool found = false;
+  MaybeObject* result_object;
+  result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                            value,
+                                                            attributes,
+                                                            &found,
+                                                            strict_mode);
+  if (found) return result_object;
   // Add a new real property.
   return AddProperty(name, value, attributes, strict_mode);
 }
@@ -1696,7 +1836,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == old_map->heap()->isolate()->context()->global_context()->
+  if (map() == GetIsolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1825,7 +1965,7 @@
                                      Object* value,
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookup(name, &result);
   return SetProperty(&result, name, value, attributes, strict_mode);
 }
@@ -1850,7 +1990,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->address());
+            Foreign::cast(structure)->foreign_address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -1880,8 +2020,9 @@
 
   if (structure->IsFixedArray()) {
     Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+     return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -1900,22 +2041,24 @@
 }
 
 
-MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                    Object* value) {
+MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                      Object* value) {
   Isolate* isolate = GetIsolate();
   Handle<Object> value_handle(value, isolate);
-  Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
-  Handle<JSObject> self(this, isolate);
+  Handle<JSReceiver> fun(setter, isolate);
+  Handle<JSReceiver> self(this, isolate);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = isolate->debug();
   // Handle stepping into a setter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
   bool has_pending_exception;
-  Object** argv[] = { value_handle.location() };
-  Execution::Call(fun, self, 1, argv, &has_pending_exception);
+  Handle<Object> argv[] = { value_handle };
+  Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *value_handle;
@@ -1928,6 +2071,9 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      return result->HandlerResult(JSProxy::cast(pt));
+    }
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
     if (result->IsProperty()) {
       if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -1948,13 +2094,22 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      String* name;
+      MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+      if (!maybe->To<String>(&name)) {
+        *found = true;  // Force abort
+        return maybe;
+      }
+      return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
+          name, value, NONE, strict_mode, found);
+    }
     if (!JSObject::cast(pt)->HasDictionaryElements()) {
       continue;
     }
-    SeededNumberDictionary* dictionary =
-        JSObject::cast(pt)->element_dictionary();
+    NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
     int entry = dictionary->FindEntry(index);
-    if (entry != SeededNumberDictionary::kNotFound) {
+    if (entry != NumberDictionary::kNotFound) {
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
         *found = true;
@@ -1970,6 +2125,48 @@
   return heap->the_hole_value();
 }
 
+MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+    String* name,
+    Object* value,
+    PropertyAttributes attributes,
+    bool* found,
+    StrictModeFlag strict_mode) {
+  Heap* heap = GetHeap();
+  // We could not find a local property so let's check whether there is an
+  // accessor that wants to handle the property.
+  LookupResult accessor_result(heap->isolate());
+  LookupCallbackSetterInPrototypes(name, &accessor_result);
+  if (accessor_result.IsFound()) {
+    *found = true;
+    if (accessor_result.type() == CALLBACKS) {
+      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+                                     name,
+                                     value,
+                                     accessor_result.holder(),
+                                     strict_mode);
+    } else if (accessor_result.type() == HANDLER) {
+      // There is a proxy in the prototype chain. Invoke its
+      // getPropertyDescriptor trap.
+      bool found = false;
+      // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+      // make sure to use the handlified references after calling
+      // the function.
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> hvalue(value);
+      MaybeObject* result =
+          accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+              name, value, attributes, strict_mode, &found);
+      if (found) return result;
+      // The proxy does not define the property as an accessor.
+      // Consequently, it has no effect on setting the receiver.
+      return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+    }
+  }
+  *found = false;
+  return heap->the_hole_value();
+}
+
 
 void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
   DescriptorArray* descriptors = map()->instance_descriptors();
@@ -1986,7 +2183,8 @@
                               String* name,
                               LookupResult* result) {
   DescriptorArray* descriptors = instance_descriptors();
-  DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+  DescriptorLookupCache* cache =
+      GetHeap()->isolate()->descriptor_lookup_cache();
   int number = cache->Lookup(descriptors, name);
   if (number == DescriptorLookupCache::kAbsent) {
     number = descriptors->Search(name);
@@ -2000,75 +2198,293 @@
 }
 
 
-MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
-                                           bool safe_to_add_transition) {
-  Heap* current_heap = heap();
-  DescriptorArray* descriptors = instance_descriptors();
-  String* elements_transition_sentinel_name = current_heap->empty_symbol();
+static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
+  ASSERT(!map.is_null());
+  for (int i = 0; i < maps->length(); ++i) {
+    if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
+  }
+  return false;
+}
 
-  if (safe_to_add_transition) {
-    // It's only safe to manipulate the descriptor array if it would be
-    // safe to add a transition.
 
-    ASSERT(!is_shared());  // no transitions can be added to shared maps.
-    // Check if the elements transition already exists.
-    DescriptorLookupCache* cache =
-        current_heap->isolate()->descriptor_lookup_cache();
-    int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
-    if (index == DescriptorLookupCache::kAbsent) {
-      index = descriptors->Search(elements_transition_sentinel_name);
-      cache->Update(descriptors,
-                    elements_transition_sentinel_name,
-                    index);
+template <class T>
+static Handle<T> MaybeNull(T* p) {
+  if (p == NULL) return Handle<T>::null();
+  return Handle<T>(p);
+}
+
+
+Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
+  ElementsKind elms_kind = elements_kind();
+  if (elms_kind == FAST_DOUBLE_ELEMENTS) {
+    bool dummy = true;
+    Handle<Map> fast_map =
+        MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
+    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+      return fast_map;
     }
+    return Handle<Map>::null();
+  }
+  if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
+    bool dummy = true;
+    Handle<Map> double_map =
+        MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
+    // In the current implementation, if the DOUBLE map doesn't exist, the
+    // FAST map can't exist either.
+    if (double_map.is_null()) return Handle<Map>::null();
+    Handle<Map> fast_map =
+        MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+                                                          &dummy));
+    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
+      return fast_map;
+    }
+    if (ContainsMap(candidates, double_map)) return double_map;
+  }
+  return Handle<Map>::null();
+}
 
-    // If the transition already exists, check the type. If there is a match,
-    // return it.
-    if (index != DescriptorArray::kNotFound) {
-      PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
-      if (details.type() == ELEMENTS_TRANSITION &&
-          details.elements_kind() == elements_kind) {
-        return descriptors->GetValue(index);
-      } else {
-        safe_to_add_transition = false;
+static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
+                                                   ElementsKind elements_kind) {
+  if (descriptor_contents->IsMap()) {
+    Map* map = Map::cast(descriptor_contents);
+    if (map->elements_kind() == elements_kind) {
+      return map;
+    }
+    return NULL;
+  }
+
+  FixedArray* map_array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < map_array->length(); ++i) {
+    Object* current = map_array->get(i);
+    // Skip undefined slots, they are sentinels for reclaimed maps.
+    if (!current->IsUndefined()) {
+      Map* current_map = Map::cast(map_array->get(i));
+      if (current_map->elements_kind() == elements_kind) {
+        return current_map;
       }
     }
   }
 
+  return NULL;
+}
+
+
+static MaybeObject* AddElementsTransitionMapToDescriptor(
+    Object* descriptor_contents,
+    Map* new_map) {
+  // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
+  // simply add the map.
+  if (descriptor_contents == NULL) {
+    return new_map;
+  }
+
+  // There was already a map in the descriptor, create a 2-element FixedArray
+  // to contain the existing map plus the new one.
+  FixedArray* new_array;
+  Heap* heap = new_map->GetHeap();
+  if (descriptor_contents->IsMap()) {
+    // Must tenure, DescriptorArray expects no new-space objects.
+    MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
+    if (!maybe_new_array->To<FixedArray>(&new_array)) {
+      return maybe_new_array;
+    }
+    new_array->set(0, descriptor_contents);
+    new_array->set(1, new_map);
+    return new_array;
+  }
+
+  // The descriptor already contained a list of maps for different ElementKinds
+  // of ELEMENTS_TRANSITION, first check the existing array for an undefined
+  // slot, and if that's not available, create a FixedArray to hold the existing
+  // maps plus the new one and fill it in.
+  FixedArray* array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < array->length(); ++i) {
+    if (array->get(i)->IsUndefined()) {
+      array->set(i, new_map);
+      return array;
+    }
+  }
+
+  // Must tenure, DescriptorArray expects no new-space objects.
+  MaybeObject* maybe_new_array =
+      heap->AllocateFixedArray(array->length() + 1, TENURED);
+  if (!maybe_new_array->To<FixedArray>(&new_array)) {
+    return maybe_new_array;
+  }
+  int i = 0;
+  while (i < array->length()) {
+    new_array->set(i, array->get(i));
+    ++i;
+  }
+  new_array->set(i, new_map);
+  return new_array;
+}
+
+
+String* Map::elements_transition_sentinel_name() {
+  return GetHeap()->empty_symbol();
+}
+
+
+Object* Map::GetDescriptorContents(String* sentinel_name,
+                                   bool* safe_to_add_transition) {
+  // Get the cached index for the descriptors lookup, or find and cache it.
+  DescriptorArray* descriptors = instance_descriptors();
+  DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+  int index = cache->Lookup(descriptors, sentinel_name);
+  if (index == DescriptorLookupCache::kAbsent) {
+    index = descriptors->Search(sentinel_name);
+    cache->Update(descriptors, sentinel_name, index);
+  }
+  // If the transition already exists, return its descriptor.
+  if (index != DescriptorArray::kNotFound) {
+    PropertyDetails details(descriptors->GetDetails(index));
+    if (details.type() == ELEMENTS_TRANSITION) {
+      return descriptors->GetValue(index);
+    } else {
+      *safe_to_add_transition = false;
+    }
+  }
+  return NULL;
+}
+
+
+Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
+                                      bool* safe_to_add_transition) {
+  // Special case: indirect SMI->FAST transition (cf. comment in
+  // AddElementsTransition()).
+  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      elements_kind == FAST_ELEMENTS) {
+    Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
+                                                        safe_to_add_transition);
+    if (double_map == NULL) return double_map;
+    return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
+                                                   safe_to_add_transition);
+  }
+  Object* descriptor_contents = GetDescriptorContents(
+      elements_transition_sentinel_name(), safe_to_add_transition);
+  if (descriptor_contents != NULL) {
+    Map* maybe_transition_map =
+        GetElementsTransitionMapFromDescriptor(descriptor_contents,
+                                               elements_kind);
+    ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
+    return maybe_transition_map;
+  }
+  return NULL;
+}
+
+
+MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
+                                        Map* transitioned_map) {
+  // The map transition graph should be a tree, therefore the transition
+  // from SMI to FAST elements is not done directly, but by going through
+  // DOUBLE elements first.
+  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      elements_kind == FAST_ELEMENTS) {
+    bool safe_to_add = true;
+    Map* double_map = this->LookupElementsTransitionMap(
+        FAST_DOUBLE_ELEMENTS, &safe_to_add);
+    // This method is only called when safe_to_add_transition has been found
+    // to be true earlier.
+    ASSERT(safe_to_add);
+
+    if (double_map == NULL) {
+      MaybeObject* maybe_map = this->CopyDropTransitions();
+      if (!maybe_map->To(&double_map)) return maybe_map;
+      double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+      MaybeObject* maybe_double_transition = this->AddElementsTransition(
+          FAST_DOUBLE_ELEMENTS, double_map);
+      if (maybe_double_transition->IsFailure()) return maybe_double_transition;
+    }
+    return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
+  }
+
+  bool safe_to_add_transition = true;
+  Object* descriptor_contents = GetDescriptorContents(
+      elements_transition_sentinel_name(), &safe_to_add_transition);
+  // This method is only called when safe_to_add_transition has been found
+  // to be true earlier.
+  ASSERT(safe_to_add_transition);
+  MaybeObject* maybe_new_contents =
+      AddElementsTransitionMapToDescriptor(descriptor_contents,
+                                           transitioned_map);
+  Object* new_contents;
+  if (!maybe_new_contents->ToObject(&new_contents)) {
+    return maybe_new_contents;
+  }
+
+  ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
+                                    new_contents);
+  Object* new_descriptors;
+  MaybeObject* maybe_new_descriptors =
+      instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
+  if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+    return maybe_new_descriptors;
+  }
+  set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  return this;
+}
+
+
+Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
+                                               ElementsKind to_kind) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     object->GetElementsTransitionMap(to_kind),
+                     Map);
+}
+
+
+MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) {
+  Map* current_map = map();
+  ElementsKind from_kind = current_map->elements_kind();
+
+  if (from_kind == to_kind) return current_map;
+
+  // Only objects with FastProperties can have DescriptorArrays and can track
+  // element-related maps. Also don't add descriptors to maps that are shared.
+  bool safe_to_add_transition = HasFastProperties() &&
+      !current_map->IsUndefined() &&
+      !current_map->is_shared();
+
+  // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
+  // with elements that switch back and forth between dictionary and fast
+  // element mode.
+  if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    safe_to_add_transition = false;
+  }
+
+  if (safe_to_add_transition) {
+    // It's only safe to manipulate the descriptor array if it would be
+    // safe to add a transition.
+    Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
+        to_kind, &safe_to_add_transition);
+    if (maybe_transition_map != NULL) {
+      return maybe_transition_map;
+    }
+  }
+
+  Map* new_map = NULL;
+
   // No transition to an existing map for the given ElementsKind. Make a new
   // one.
-  Object* obj;
-  { MaybeObject* maybe_map = CopyDropTransitions();
-    if (!maybe_map->ToObject(&obj)) return maybe_map;
+  { MaybeObject* maybe_map = current_map->CopyDropTransitions();
+    if (!maybe_map->To(&new_map)) return maybe_map;
   }
-  Map* new_map = Map::cast(obj);
 
-  new_map->set_elements_kind(elements_kind);
-  GetIsolate()->counters()->map_to_external_array_elements()->Increment();
+  new_map->set_elements_kind(to_kind);
 
   // Only remember the map transition if the object's map is NOT equal to the
   // global object_function's map and there is not an already existing
   // non-matching element transition.
-  bool allow_map_transition =
-      safe_to_add_transition &&
+  bool allow_map_transition = safe_to_add_transition &&
       (GetIsolate()->context()->global_context()->object_function()->map() !=
        map());
   if (allow_map_transition) {
-    // Allocate new instance descriptors for the old map with map transition.
-    ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
-                                      Map::cast(new_map),
-                                      elements_kind);
-    Object* new_descriptors;
-    MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
-        &desc,
-        KEEP_TRANSITIONS);
-    if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
-      return maybe_new_descriptors;
-    }
-    descriptors = DescriptorArray::cast(new_descriptors);
-    set_instance_descriptors(descriptors);
+    MaybeObject* maybe_transition =
+        current_map->AddElementsTransition(to_kind, new_map);
+    if (maybe_transition->IsFailure()) return maybe_transition;
   }
-
   return new_map;
 }
 
@@ -2079,6 +2495,7 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
+    // A GlobalProxy's prototype should always be a proper JSObject.
     return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
   }
 
@@ -2173,7 +2590,7 @@
         case INTERCEPTOR: {
           // Try lookup real named properties. Note that only property can be
           // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
-          LookupResult r;
+          LookupResult r(GetIsolate());
           LookupRealNamedProperty(name, &r);
           if (r.IsProperty()) {
             return SetPropertyWithFailedAccessCheck(&r,
@@ -2191,10 +2608,10 @@
     }
   }
 
-  Heap* heap = GetHeap();
-  HandleScope scope(heap->isolate());
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
   Handle<Object> value_handle(value);
-  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+  isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
 
@@ -2205,7 +2622,7 @@
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
   if (result->IsFound() && result->type() == HANDLER) {
-    return JSProxy::cast(this)->SetPropertyWithHandler(
+    return result->proxy()->SetPropertyWithHandler(
         key, value, attributes, strict_mode);
   } else {
     return JSObject::cast(this)->SetPropertyForResult(
@@ -2219,22 +2636,11 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_has_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return result->ToBoolean()->IsTrue();
 }
@@ -2250,24 +2656,85 @@
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
   Handle<Object> value(value_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name, value };
+  CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_set_trap();
+
+  return *value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+    String* name_raw,
+    Object* value_raw,
+    PropertyAttributes attributes,
+    StrictModeFlag strict_mode,
+    bool* found) {
+  *found = true;  // except where defined otherwise...
+  Isolate* isolate = GetHeap()->isolate();
+  Handle<JSProxy> proxy(this);
+  Handle<Object> handler(this->handler());  // Trap might morph proxy.
+  Handle<String> name(name_raw);
+  Handle<Object> value(value_raw);
+  Handle<Object> args[] = { name };
+  Handle<Object> result = proxy->CallTrap(
+      "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+
+  if (!result->IsUndefined()) {
+    // The proxy handler cares about this property.
+    // Check whether it is virtualized as an accessor.
+    // Emulate [[GetProperty]] semantics for proxies.
+    bool has_pending_exception;
+    Handle<Object> argv[] = { result };
+    Handle<Object> desc =
+        Execution::Call(isolate->to_complete_property_descriptor(), result,
+                        ARRAY_SIZE(argv), argv, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+
+    Handle<String> conf_name =
+        isolate->factory()->LookupAsciiSymbol("configurable_");
+    Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (configurable->IsFalse()) {
+      Handle<String> trap =
+          isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+      Handle<Object> args[] = { handler, trap, name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+          "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+      return isolate->Throw(*error);
+    }
+    ASSERT(configurable->IsTrue());
+
+    // Check for AccessorDescriptor.
+    Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+    Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (!setter->IsUndefined()) {
+      // We have a setter -- invoke it.
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return proxy->SetPropertyWithDefinedSetter(
+          JSReceiver::cast(*setter), *value);
+    } else {
+      Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
+      Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
+      ASSERT(!isolate->has_pending_exception());
+      if (!getter->IsUndefined()) {
+        // We have a getter but no setter -- the property may not be
+        // written. In strict mode, throw an error.
+        if (strict_mode == kNonStrictMode) return *value;
+        Handle<Object> args[] = { name, proxy };
+        Handle<Object> error = isolate->factory()->NewTypeError(
+            "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
+        return isolate->Throw(*error);
+      }
+    }
+    // Fall-through.
   }
 
-  // Call trap function.
-  Object** args[] = {
-      receiver.location(), name.location(), value.location()
-  };
-  bool has_exception;
-  Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
-
+  // The proxy does not define the property as an accessor.
+  *found = false;
   return *value;
 }
 
@@ -2278,31 +2745,16 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "delete", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
-    isolate->Throw(*error);
-    return Failure::Exception();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   Object* bool_result = result->ToBoolean();
-  if (mode == STRICT_DELETION &&
-      bool_result == isolate->heap()->false_value()) {
-    Handle<Object> args[] = { handler, trap_name };
+  if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+    Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+    Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
@@ -2312,39 +2764,76 @@
 }
 
 
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
-    JSReceiver* receiver_raw,
-    String* name_raw,
-    bool* has_exception) {
+MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
+    uint32_t index,
+    DeleteMode mode) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return JSProxy::DeletePropertyWithHandler(*name, mode);
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+    JSReceiver* receiver_raw,
+    String* name_raw) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<JSProxy> proxy(this);
+  Handle<Object> handler(this->handler());  // Trap might morph proxy.
   Handle<JSReceiver> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name =
-      isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return NONE;
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
+
+  if (result->IsUndefined()) return ABSENT;
+
+  bool has_pending_exception;
+  Handle<Object> argv[] = { result };
+  Handle<Object> desc =
+      Execution::Call(isolate->to_complete_property_descriptor(), result,
+                      ARRAY_SIZE(argv), argv, &has_pending_exception);
+  if (has_pending_exception) return NONE;
+
+  // Convert result to PropertyAttributes.
+  Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
+  Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
+  Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
+  Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+  if (isolate->has_pending_exception()) return NONE;
+
+  if (configurable->IsFalse()) {
+    Handle<String> trap =
+        isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+    Handle<Object> args[] = { handler, trap, name };
     Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
-    *has_exception = true;
     return NONE;
   }
 
-  // Call trap function.
-  Object** args[] = { name.location() };
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
-  if (has_exception) return NONE;
+  int attributes = NONE;
+  if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
+  if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
+  if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
+  return static_cast<PropertyAttributes>(attributes);
+}
 
-  // TODO(rossberg): convert result to PropertyAttributes
-  USE(result);
-  return NONE;
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+    JSReceiver* receiver,
+    uint32_t index) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return GetPropertyAttributeWithHandler(receiver, *name);
 }
 
 
@@ -2353,6 +2842,9 @@
   HandleScope scope(isolate);
   Handle<JSProxy> self(this);
 
+  // Save identity hash.
+  MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+
   if (IsJSFunctionProxy()) {
     isolate->factory()->BecomeJSFunction(self);
     // Code will be set on the JavaScript side.
@@ -2360,9 +2852,42 @@
     isolate->factory()->BecomeJSObject(self);
   }
   ASSERT(self->IsJSObject());
+
+  // Inherit identity, if it was present.
+  Object* hash;
+  if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
+    Handle<JSObject> new_self(JSObject::cast(*self));
+    isolate->factory()->SetIdentityHash(new_self, hash);
+  }
 }
 
 
+MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
+                                                 Handle<Object> derived,
+                                                 int argc,
+                                                 Handle<Object> argv[]) {
+  Isolate* isolate = GetIsolate();
+  Handle<Object> handler(this->handler());
+
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return trap;
+
+  if (trap->IsUndefined()) {
+    if (derived.is_null()) {
+      Handle<Object> args[] = { handler, trap_name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+      isolate->Throw(*error);
+      return Handle<Object>();
+    }
+    trap = Handle<Object>(derived);
+  }
+
+  bool threw;
+  return Execution::Call(trap, handler, argc, argv, &threw);
+}
+
 
 MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
                                             String* name,
@@ -2387,48 +2912,46 @@
   }
 
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()
-      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(result,
-                                            name,
-                                            value,
-                                            true,
-                                            strict_mode);
+  if (IsAccessCheckNeeded()) {
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      return SetPropertyWithFailedAccessCheck(
+          result, name, value, true, strict_mode);
+    }
   }
 
   if (IsJSGlobalProxy()) {
     Object* proto = GetPrototype();
     if (proto->IsNull()) return value;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->SetProperty(
+    return JSObject::cast(proto)->SetPropertyForResult(
         result, name, value, attributes, strict_mode);
   }
 
   if (!result->IsProperty() && !IsJSContextExtensionObject()) {
-    // We could not find a local property so let's check whether there is an
-    // accessor that wants to handle the property.
-    LookupResult accessor_result;
-    LookupCallbackSetterInPrototypes(name, &accessor_result);
-    if (accessor_result.IsProperty()) {
-      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
-                                     name,
-                                     value,
-                                     accessor_result.holder(),
-                                     strict_mode);
-    }
+    bool found = false;
+    MaybeObject* result_object;
+    result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                              value,
+                                                              attributes,
+                                                              &found,
+                                                              strict_mode);
+    if (found) return result_object;
   }
+
+  // At this point, no GC should have happened, as this would invalidate
+  // 'result', which we cannot handlify!
+
   if (!result->IsFound()) {
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes, strict_mode);
   }
   if (result->IsReadOnly() && result->IsProperty()) {
     if (strict_mode == kStrictMode) {
-      HandleScope scope(heap->isolate());
-      Handle<String> key(name);
-      Handle<Object> holder(this);
-      Handle<Object> args[2] = { key, holder };
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> args[] = { hname, self };
       return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
-          "strict_read_only_property", HandleVector(args, 2)));
+          "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
     } else {
       return value;
     }
@@ -2484,10 +3007,11 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return value;
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return value;
 }
 
@@ -2509,12 +3033,12 @@
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
   AssertNoContextChange ncc;
-  LookupResult result;
+  Isolate* isolate = GetIsolate();
+  LookupResult result(isolate);
   LocalLookup(name, &result);
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
       return SetPropertyWithFailedAccessCheck(&result,
                                               name,
                                               value,
@@ -2572,10 +3096,11 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return value;
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return value;
 }
 
@@ -2585,7 +3110,7 @@
       String* name,
       bool continue_search) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) return result.GetAttributes();
 
@@ -2657,12 +3182,11 @@
       String* key) {
   uint32_t index = 0;
   if (IsJSObject() && key->AsArrayIndex(&index)) {
-    if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
-      return NONE;
-    return ABSENT;
+    return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
+        ? NONE : ABSENT;
   }
   // Named property.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   Lookup(key, &result);
   return GetPropertyAttribute(receiver, &result, key, true);
 }
@@ -2689,10 +3213,8 @@
       case CALLBACKS:
         return result->GetAttributes();
       case HANDLER: {
-        // TODO(rossberg): propagate exceptions properly.
-        bool has_exception = false;
-        return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
-            receiver, name, &has_exception);
+        return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+            receiver, name);
       }
       case INTERCEPTOR:
         return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -2713,7 +3235,7 @@
     return ABSENT;
   }
   // Named property.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookup(name, &result);
   return GetPropertyAttribute(this, &result, name, false);
 }
@@ -2728,7 +3250,9 @@
   if (result->IsMap() &&
       Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
 #ifdef DEBUG
-    Map::cast(result)->SharedMapVerify();
+    if (FLAG_verify_heap) {
+      Map::cast(result)->SharedMapVerify();
+    }
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit.
       Object* fresh;
@@ -2764,6 +3288,15 @@
 }
 
 
+void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
+                                  Handle<String> name,
+                                  Handle<Code> code) {
+  Isolate* isolate = object->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate,
+                          object->UpdateMapCodeCache(*name, *code));
+}
+
+
 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
   if (map()->is_shared()) {
     // Fast case maps are never marked as shared.
@@ -2853,12 +3386,14 @@
       case INTERCEPTOR:
       case ELEMENTS_TRANSITION:
         break;
-      default:
+      case HANDLER:
+      case NORMAL:
         UNREACHABLE();
+        break;
     }
   }
 
-  Heap* current_heap = map_of_this->heap();
+  Heap* current_heap = GetHeap();
 
   // Copy the next enumeration index from instance descriptor.
   int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
@@ -2880,6 +3415,10 @@
   ASSERT(instance_size_delta >= 0);
   current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
                                      instance_size_delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
+  }
+
 
   set_map(new_map);
   new_map->clear_instance_descriptors();
@@ -2913,13 +3452,14 @@
   FixedArrayBase* array = FixedArrayBase::cast(elements());
   Map* old_map = array->map();
   bool is_arguments =
-      (old_map == old_map->heap()->non_strict_arguments_elements_map());
+      (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
   if (is_arguments) {
     array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
   }
   if (array->IsDictionary()) return array;
 
   ASSERT(HasFastElements() ||
+         HasFastSmiOnlyElements() ||
          HasFastDoubleElements() ||
          HasFastArgumentsElements());
   // Compute the effective length and allocate a new backing store.
@@ -2929,11 +3469,11 @@
   int old_capacity = 0;
   int used_elements = 0;
   GetElementsCapacityAndUsage(&old_capacity, &used_elements);
-  SeededNumberDictionary* dictionary = NULL;
+  NumberDictionary* dictionary = NULL;
   { Object* object;
-    MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements);
+    MaybeObject* maybe = NumberDictionary::Allocate(used_elements);
     if (!maybe->ToObject(&object)) return maybe;
-    dictionary = SeededNumberDictionary::cast(object);
+    dictionary = NumberDictionary::cast(object);
   }
 
   // Copy the elements to the new backing store.
@@ -2954,7 +3494,8 @@
         if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
       }
     } else {
-      ASSERT(old_map->has_fast_elements());
+      ASSERT(old_map->has_fast_elements() ||
+             old_map->has_fast_smi_only_elements());
       value = FixedArray::cast(array)->get(i);
     }
     PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -2963,7 +3504,7 @@
       MaybeObject* maybe_result =
           dictionary->AddNumberEntry(i, value, details);
       if (!maybe_result->ToObject(&result)) return maybe_result;
-      dictionary = SeededNumberDictionary::cast(result);
+      dictionary = NumberDictionary::cast(result);
     }
   }
 
@@ -2974,13 +3515,14 @@
     // Set the new map first to satify the elements type assert in
     // set_elements().
     Object* new_map;
-    MaybeObject* maybe = map()->GetSlowElementsMap();
+    MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS);
     if (!maybe->ToObject(&new_map)) return maybe;
     set_map(Map::cast(new_map));
     set_elements(dictionary);
   }
 
-  old_map->isolate()->counters()->elements_to_dictionary()->Increment();
+  old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+      Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2994,102 +3536,229 @@
 }
 
 
-MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+Smi* JSReceiver::GenerateIdentityHash() {
   Isolate* isolate = GetIsolate();
-  Heap* heap = isolate->heap();
-  Object* holder = BypassGlobalProxy();
-  if (holder->IsUndefined()) return heap->undefined_value();
-  JSObject* obj = JSObject::cast(holder);
-  if (obj->HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = obj->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == heap->hidden_symbol()) &&
-        descriptors->IsProperty(0)) {
-      ASSERT(descriptors->GetType(0) == FIELD);
-      return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
-    }
-  }
-
-  // Only attempt to find the hidden properties in the local object and not
-  // in the prototype chain.
-  if (!obj->HasHiddenPropertiesObject()) {
-    // Hidden properties object not found. Allocate a new hidden properties
-    // object if requested. Otherwise return the undefined value.
-    if (flag == ALLOW_CREATION) {
-      Object* hidden_obj;
-      { MaybeObject* maybe_obj = heap->AllocateJSObject(
-            isolate->context()->global_context()->object_function());
-        if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
-      }
-      // Don't allow leakage of the hidden object through accessors
-      // on Object.prototype.
-      {
-        MaybeObject* maybe_obj =
-            JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
-        if (maybe_obj->IsFailure()) return maybe_obj;
-      }
-      return obj->SetHiddenPropertiesObject(hidden_obj);
-    } else {
-      return heap->undefined_value();
-    }
-  }
-  return obj->GetHiddenPropertiesObject();
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
-  Isolate* isolate = GetIsolate();
-  Object* hidden_props_obj;
-  { MaybeObject* maybe_obj = GetHiddenProperties(flag);
-    if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
-  }
-  if (!hidden_props_obj->IsJSObject()) {
-    // We failed to create hidden properties.  That's a detached
-    // global proxy.
-    ASSERT(hidden_props_obj->IsUndefined());
-    return Smi::FromInt(0);
-  }
-  JSObject* hidden_props = JSObject::cast(hidden_props_obj);
-  String* hash_symbol = isolate->heap()->identity_hash_symbol();
-  {
-    // Note that HasLocalProperty() can cause a GC in the general case in the
-    // presence of interceptors.
-    AssertNoAllocation no_alloc;
-    if (hidden_props->HasLocalProperty(hash_symbol)) {
-      MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
-      return Smi::cast(hash->ToObjectChecked());
-    }
-  }
 
   int hash_value;
   int attempts = 0;
   do {
     // Generate a random 32-bit hash value but limit range to fit
     // within a smi.
-    hash_value = V8::Random(isolate) & Smi::kMaxValue;
+    hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
     attempts++;
   } while (hash_value == 0 && attempts < 30);
   hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
 
-  Smi* hash = Smi::FromInt(hash_value);
-  { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
-        hash_symbol,
-        hash,
-        static_cast<PropertyAttributes>(None));
-    if (result->IsFailure()) return result;
+  return Smi::FromInt(hash_value);
+}
+
+
+MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+  MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                         hash);
+  if (maybe->IsFailure()) return maybe;
+  return this;
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
+  Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+  if (stored_value->IsSmi()) return stored_value;
+
+  // Do not generate permanent identity hash code if not requested.
+  if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
+
+  Smi* hash = GenerateIdentityHash();
+  MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                          hash);
+  if (result->IsFailure()) return result;
+  if (result->ToObjectUnchecked()->IsUndefined()) {
+    // Trying to get hash of detached proxy.
+    return Smi::FromInt(0);
   }
   return hash;
 }
 
 
+MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
+  Object* hash = this->hash();
+  if (!hash->IsSmi() && flag == ALLOW_CREATION) {
+    hash = GenerateIdentityHash();
+    set_hash(hash);
+  }
+  return hash;
+}
+
+
+Object* JSObject::GetHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
+    return GetHeap()->undefined_value();
+  }
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
+  return dictionary->ValueAt(entry);
+}
+
+
+MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
+  StringDictionary* dictionary;
+  if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
+
+  // If it was found, check if the key is already in the dictionary.
+  int entry = dictionary->FindEntry(key);
+  if (entry != StringDictionary::kNotFound) {
+    // If key was found, just update the value.
+    dictionary->ValueAtPut(entry, value);
+    return this;
+  }
+  // Key was not already in the dictionary, so add the entry.
+  MaybeObject* insert_result = dictionary->Add(key,
+                                               value,
+                                               PropertyDetails(NONE, NORMAL));
+  StringDictionary* new_dict;
+  if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
+  if (new_dict != dictionary) {
+    // If adding the key expanded the dictionary (i.e., Add returned a new
+    // dictionary), store it back to the object.
+    MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+    if (store_result->IsFailure()) return store_result;
+  }
+  // Return this to mark success.
+  return this;
+}
+
+
+void JSObject::DeleteHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return immediately.
+    if (proxy_parent->IsNull()) return;
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
+    return;
+  }
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) {
+    // Key wasn't in dictionary. Deletion is a success.
+    return;
+  }
+  // Key was in the dictionary. Remove it.
+  dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+  return GetPropertyAttributePostInterceptor(this,
+                                             GetHeap()->hidden_symbol(),
+                                             false) != ABSENT;
+}
+
+
+MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+  ASSERT(!IsJSGlobalProxy());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      Object* hidden_store =
+          this->FastPropertyAt(descriptors->GetFieldIndex(0));
+      return StringDictionary::cast(hidden_store);
+    }
+  } else {
+    PropertyAttributes attributes;
+    // You can't install a getter on a property indexed by the hidden symbol,
+    // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+    // object.
+    Object* lookup =
+        GetLocalPropertyPostInterceptor(this,
+                                        GetHeap()->hidden_symbol(),
+                                        &attributes)->ToObjectUnchecked();
+    if (!lookup->IsUndefined()) {
+      return StringDictionary::cast(lookup);
+    }
+  }
+  if (!create_if_absent) return GetHeap()->undefined_value();
+  const int kInitialSize = 5;
+  MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
+  StringDictionary* dictionary;
+  if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return dictionary;
+}
+
+
+MaybeObject* JSObject::SetHiddenPropertiesDictionary(
+    StringDictionary* dictionary) {
+  ASSERT(!IsJSGlobalProxy());
+  ASSERT(HasHiddenProperties());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+      return this;
+    }
+  }
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return this;
+}
+
+
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (!result.IsProperty()) return GetHeap()->true_value();
 
@@ -3202,9 +3871,16 @@
 MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
   if (IsJSProxy()) {
     return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
-  } else {
-    return JSObject::cast(this)->DeleteProperty(name, mode);
   }
+  return JSObject::cast(this)->DeleteProperty(name, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+  }
+  return JSObject::cast(this)->DeleteElement(index, mode);
 }
 
 
@@ -3231,7 +3907,7 @@
   if (name->AsArrayIndex(&index)) {
     return DeleteElement(index, mode);
   } else {
-    LookupResult result;
+    LookupResult result(isolate);
     LocalLookup(name, &result);
     if (!result.IsProperty()) return isolate->heap()->true_value();
     // Ignore attributes if forcing a deletion.
@@ -3268,7 +3944,8 @@
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  ASSERT(kind == FAST_ELEMENTS ||
+         kind == DICTIONARY_ELEMENTS);
   if (kind == FAST_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
@@ -3278,8 +3955,7 @@
       if (!element->IsTheHole() && element == object) return true;
     }
   } else {
-    Object* key =
-        SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
+    Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
     if (!key->IsUndefined()) return true;
   }
   return false;
@@ -3289,7 +3965,7 @@
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
@@ -3324,6 +4000,8 @@
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
+      break;
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(this->elements());
@@ -3418,9 +4096,9 @@
   }
 
   // If there are fast elements we normalize.
-  SeededNumberDictionary* dictionary = NULL;
+  NumberDictionary* dictionary = NULL;
   { MaybeObject* maybe = NormalizeElements();
-    if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
+    if (!maybe->To<NumberDictionary>(&dictionary)) return maybe;
   }
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   // Make sure that we never go back to fast case.
@@ -3440,15 +4118,16 @@
 
 
 // Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that it has
-//   no interceptors and needs no access checks).
+// - This object and all prototypes has an enum cache (which means that
+//   it is no proxy, has no interceptors and needs no access checks).
 // - This object has no elements.
 // - No prototype has enumerable properties/elements.
-bool JSObject::IsSimpleEnum() {
+bool JSReceiver::IsSimpleEnum() {
   Heap* heap = GetHeap();
   for (Object* o = this;
        o != heap->null_value();
        o = JSObject::cast(o)->GetPrototype()) {
+    if (!o->IsJSObject()) return false;
     JSObject* curr = JSObject::cast(o);
     if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
     ASSERT(!curr->HasNamedInterceptor());
@@ -3511,15 +4190,6 @@
 
 
 void JSReceiver::LocalLookup(String* name, LookupResult* result) {
-  if (IsJSProxy()) {
-    result->HandlerResult();
-  } else {
-    JSObject::cast(this)->LocalLookup(name, result);
-  }
-}
-
-
-void JSObject::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
   Heap* heap = GetHeap();
@@ -3528,28 +4198,36 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->LocalLookup(name, result);
+    return JSReceiver::cast(proto)->LocalLookup(name, result);
+  }
+
+  if (IsJSProxy()) {
+    result->HandlerResult(JSProxy::cast(this));
+    return;
   }
 
   // Do not use inline caching if the object is a non-global object
   // that requires access checks.
-  if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
+  if (IsAccessCheckNeeded()) {
     result->DisallowCaching();
   }
 
+  JSObject* js_object = JSObject::cast(this);
+
   // Check __proto__ before interceptor.
   if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
-    result->ConstantResult(this);
+    result->ConstantResult(js_object);
     return;
   }
 
   // Check for lookup interceptor except when bootstrapping.
-  if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
-    result->InterceptorResult(this);
+  if (js_object->HasNamedInterceptor() &&
+      !heap->isolate()->bootstrapper()->IsActive()) {
+    result->InterceptorResult(js_object);
     return;
   }
 
-  LocalLookupRealNamedProperty(name, result);
+  js_object->LocalLookupRealNamedProperty(name, result);
 }
 
 
@@ -3559,7 +4237,7 @@
   for (Object* current = this;
        current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
-    JSObject::cast(current)->LocalLookup(name, result);
+    JSReceiver::cast(current)->LocalLookup(name, result);
     if (result->IsProperty()) return;
   }
   result->NotFound();
@@ -3570,7 +4248,7 @@
 void JSObject::LookupCallback(String* name, LookupResult* result) {
   Heap* heap = GetHeap();
   for (Object* current = this;
-       current != heap->null_value();
+       current != heap->null_value() && current->IsJSObject();
        current = JSObject::cast(current)->GetPrototype()) {
     JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
     if (result->IsProperty() && result->type() == CALLBACKS) return;
@@ -3579,19 +4257,27 @@
 }
 
 
-// Search for a getter or setter in an elements dictionary.  Returns either
-// undefined if the element is read-only, or the getter/setter pair (fixed
-// array) if there is an existing one, or the hole value if the element does
-// not exist or is a normal non-getter/setter data element.
-static Object* FindGetterSetterInDictionary(SeededNumberDictionary* dictionary,
-                                            uint32_t index,
-                                            Heap* heap) {
+// Search for a getter or setter in an elements dictionary and update its
+// attributes.  Returns either undefined if the element is read-only, or the
+// getter/setter pair (fixed array) if there is an existing one, or the hole
+// value if the element does not exist or is a normal non-getter/setter data
+// element.
+static Object* UpdateGetterSetterInDictionary(NumberDictionary* dictionary,
+                                              uint32_t index,
+                                              PropertyAttributes attributes,
+                                              Heap* heap) {
   int entry = dictionary->FindEntry(index);
-  if (entry != SeededNumberDictionary::kNotFound) {
+  if (entry != NumberDictionary::kNotFound) {
     Object* result = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
     if (details.IsReadOnly()) return heap->undefined_value();
-    if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+    if (details.type() == CALLBACKS && result->IsFixedArray()) {
+      if (details.attributes() != attributes) {
+        dictionary->DetailsAtPut(entry,
+                                 PropertyDetails(attributes, CALLBACKS, index));
+      }
+      return result;
+    }
   }
   return heap->the_hole_value();
 }
@@ -3616,6 +4302,7 @@
 
   if (is_element) {
     switch (GetElementsKind()) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -3632,8 +4319,10 @@
         // elements.
         return heap->undefined_value();
       case DICTIONARY_ELEMENTS: {
-        Object* probe =
-            FindGetterSetterInDictionary(element_dictionary(), index, heap);
+        Object* probe = UpdateGetterSetterInDictionary(element_dictionary(),
+                                                       index,
+                                                       attributes,
+                                                       heap);
         if (!probe->IsTheHole()) return probe;
         // Otherwise allow to override it.
         break;
@@ -3649,9 +4338,11 @@
         if (probe == NULL || probe->IsTheHole()) {
           FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
           if (arguments->IsDictionary()) {
-            SeededNumberDictionary* dictionary =
-                SeededNumberDictionary::cast(arguments);
-            probe = FindGetterSetterInDictionary(dictionary, index, heap);
+            NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+            probe = UpdateGetterSetterInDictionary(dictionary,
+                                                   index,
+                                                   attributes,
+                                                   heap);
             if (!probe->IsTheHole()) return probe;
           }
         }
@@ -3660,7 +4351,7 @@
     }
   } else {
     // Lookup the name.
-    LookupResult result;
+    LookupResult result(heap->isolate());
     LocalLookup(name, &result);
     if (result.IsProperty()) {
       if (result.IsReadOnly()) return heap->undefined_value();
@@ -3690,8 +4381,8 @@
 
 
 bool JSObject::CanSetCallback(String* name) {
-  ASSERT(!IsAccessCheckNeeded()
-         || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
+  ASSERT(!IsAccessCheckNeeded() ||
+         GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
 
   // Check if there is an API defined callback object which prohibits
   // callback overwriting in this object or it's prototype chain.
@@ -3699,7 +4390,7 @@
   // certain accessors such as window.location should not be allowed
   // to be overwritten because allowing overwriting could potentially
   // cause security problems.
-  LookupResult callback_result;
+  LookupResult callback_result(GetIsolate());
   LookupCallback(name, &callback_result);
   if (callback_result.IsProperty()) {
     Object* obj = callback_result.GetCallbackObject();
@@ -3719,11 +4410,11 @@
   PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   // Normalize elements to make this operation simple.
-  SeededNumberDictionary* dictionary = NULL;
+  NumberDictionary* dictionary = NULL;
   { Object* result;
     MaybeObject* maybe = NormalizeElements();
     if (!maybe->ToObject(&result)) return maybe;
-    dictionary = SeededNumberDictionary::cast(result);
+    dictionary = NumberDictionary::cast(result);
   }
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
 
@@ -3731,7 +4422,7 @@
   { Object* result;
     MaybeObject* maybe = dictionary->Set(index, structure, details);
     if (!maybe->ToObject(&result)) return maybe;
-    dictionary = SeededNumberDictionary::cast(result);
+    dictionary = NumberDictionary::cast(result);
   }
 
   dictionary->set_requires_slow_elements();
@@ -3803,7 +4494,7 @@
                                       bool is_getter,
                                       Object* fun,
                                       PropertyAttributes attributes) {
-  ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+  ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
@@ -3866,6 +4557,7 @@
 
     // Accessors overwrite previous callbacks (cf. with getters/setters).
     switch (GetElementsKind()) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -3895,7 +4587,7 @@
     }
   } else {
     // Lookup the name.
-    LookupResult result;
+    LookupResult result(isolate);
     LocalLookup(name, &result);
     // ES5 forbids turning a property into an accessor if it's not
     // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
@@ -3928,7 +4620,11 @@
   }
 
   // Make the lookup and include prototypes.
-  int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
+  // Introducing constants below makes static constants usage purely static
+  // and avoids linker errors in debug build using gcc.
+  const int getter_index = kGetterIndex;
+  const int setter_index = kSetterIndex;
+  int accessor_index = is_getter ? getter_index : setter_index;
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
     for (Object* obj = this;
@@ -3936,9 +4632,9 @@
          obj = JSObject::cast(obj)->GetPrototype()) {
       JSObject* js_object = JSObject::cast(obj);
       if (js_object->HasDictionaryElements()) {
-        SeededNumberDictionary* dictionary = js_object->element_dictionary();
+        NumberDictionary* dictionary = js_object->element_dictionary();
         int entry = dictionary->FindEntry(index);
-        if (entry != SeededNumberDictionary::kNotFound) {
+        if (entry != NumberDictionary::kNotFound) {
           Object* element = dictionary->ValueAt(entry);
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.type() == CALLBACKS) {
@@ -3953,7 +4649,7 @@
     for (Object* obj = this;
          obj != heap->null_value();
          obj = JSObject::cast(obj)->GetPrototype()) {
-      LookupResult result;
+      LookupResult result(heap->isolate());
       JSObject::cast(obj)->LocalLookup(name, &result);
       if (result.IsProperty()) {
         if (result.IsReadOnly()) return heap->undefined_value();
@@ -4061,7 +4757,7 @@
   Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
 
 #ifdef DEBUG
-  if (Map::cast(result)->is_shared()) {
+  if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
     Map::cast(result)->SharedMapVerify();
   }
 #endif
@@ -4084,12 +4780,19 @@
   return new_map;
 }
 
+void Map::UpdateCodeCache(Handle<Map> map,
+                          Handle<String> name,
+                          Handle<Code> code) {
+  Isolate* isolate = map->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate,
+                          map->UpdateCodeCache(*name, *code));
+}
 
 MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -4131,7 +4834,7 @@
   // Traverse the transition tree without using a stack.  We do this by
   // reversing the pointers in the maps and descriptor arrays.
   Map* current = this;
-  Map* meta_map = heap()->meta_map();
+  Map* meta_map = GetHeap()->meta_map();
   Object** map_or_index_field = NULL;
   while (current != meta_map) {
     DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
@@ -4152,7 +4855,7 @@
           // of the next map and recording the index in the transition array in
           // the map field of the array.
           Map* next = Map::cast(contents->get(i));
-          next->set_map(current);
+          next->set_map_unsafe(current);
           *map_or_index_field = Smi::FromInt(i + 2);
           current = next;
           map_done = false;
@@ -4177,23 +4880,23 @@
       Object* perhaps_map = prototype_transitions->get(i);
       if (perhaps_map->IsMap()) {
         Map* next = Map::cast(perhaps_map);
-        next->set_map(current);
+        next->set_map_unsafe(current);
         *proto_map_or_index_field =
             Smi::FromInt(i + kProtoTransitionElementsPerEntry);
         current = next;
         continue;
       }
     }
-    *proto_map_or_index_field = heap()->fixed_array_map();
+    *proto_map_or_index_field = GetHeap()->fixed_array_map();
     if (map_or_index_field != NULL) {
-      *map_or_index_field = heap()->fixed_array_map();
+      *map_or_index_field = GetHeap()->fixed_array_map();
     }
 
     // The callback expects a map to have a real map as its map, so we save
     // the map field, which is being used to track the traversal and put the
     // correct map (the meta_map) in place while we do the callback.
     Map* prev = current->map();
-    current->set_map(meta_map);
+    current->set_map_unsafe(meta_map);
     callback(current, data);
     current = prev;
   }
@@ -4409,7 +5112,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -4467,13 +5170,22 @@
 void CodeCacheHashTable::RemoveByIndex(int index) {
   ASSERT(index >= 0);
   Heap* heap = GetHeap();
-  set(EntryToIndex(index), heap->null_value());
-  set(EntryToIndex(index) + 1, heap->null_value());
+  set(EntryToIndex(index), heap->the_hole_value());
+  set(EntryToIndex(index) + 1, heap->the_hole_value());
   ElementRemoved();
 }
 
 
-MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
+                                  MapHandleList* maps,
+                                  Code::Flags flags,
+                                  Handle<Code> code) {
+  Isolate* isolate = cache->GetIsolate();
+  CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
+}
+
+
+MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
                                           Code::Flags flags,
                                           Code* code) {
   // Initialize cache if necessary.
@@ -4501,13 +5213,14 @@
 }
 
 
-Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
+                                            Code::Flags flags) {
   if (!cache()->IsUndefined()) {
     PolymorphicCodeCacheHashTable* hash_table =
         PolymorphicCodeCacheHashTable::cast(cache());
-    return hash_table->Lookup(maps, flags);
+    return Handle<Object>(hash_table->Lookup(maps, flags));
   } else {
-    return GetHeap()->undefined_value();
+    return GetIsolate()->factory()->undefined_value();
   }
 }
 
@@ -4518,12 +5231,12 @@
 class PolymorphicCodeCacheHashTableKey : public HashTableKey {
  public:
   // Callers must ensure that |maps| outlives the newly constructed object.
-  PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+  PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
       : maps_(maps),
         code_flags_(code_flags) {}
 
   bool IsMatch(Object* other) {
-    MapList other_maps(kDefaultListAllocationSize);
+    MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(other, &other_flags, &other_maps);
     if (code_flags_ != other_flags) return false;
@@ -4539,7 +5252,7 @@
     for (int i = 0; i < maps_->length(); ++i) {
       bool match_found = false;
       for (int j = 0; j < other_maps.length(); ++j) {
-        if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+        if (maps_->at(i)->EquivalentTo(*other_maps.at(j))) {
           match_found = true;
           break;
         }
@@ -4549,7 +5262,7 @@
     return true;
   }
 
-  static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+  static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
     uint32_t hash = code_flags;
     for (int i = 0; i < maps->length(); ++i) {
       hash ^= maps->at(i)->Hash();
@@ -4562,7 +5275,7 @@
   }
 
   uint32_t HashForObject(Object* obj) {
-    MapList other_maps(kDefaultListAllocationSize);
+    MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(obj, &other_flags, &other_maps);
     return MapsHashHelper(&other_maps, other_flags);
@@ -4580,29 +5293,32 @@
     FixedArray* list = FixedArray::cast(obj);
     list->set(0, Smi::FromInt(code_flags_));
     for (int i = 0; i < maps_->length(); ++i) {
-      list->set(i + 1, maps_->at(i));
+      list->set(i + 1, *maps_->at(i));
     }
     return list;
   }
 
  private:
-  static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+  static MapHandleList* FromObject(Object* obj,
+                                   int* code_flags,
+                                   MapHandleList* maps) {
     FixedArray* list = FixedArray::cast(obj);
     maps->Rewind(0);
     *code_flags = Smi::cast(list->get(0))->value();
     for (int i = 1; i < list->length(); ++i) {
-      maps->Add(Map::cast(list->get(i)));
+      maps->Add(Handle<Map>(Map::cast(list->get(i))));
     }
     return maps;
   }
 
-  MapList* maps_;  // weak.
+  MapHandleList* maps_;  // weak.
   int code_flags_;
   static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
 };
 
 
-Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
+                                              int code_flags) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -4610,7 +5326,7 @@
 }
 
 
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
                                                 int code_flags,
                                                 Code* code) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
@@ -4745,9 +5461,9 @@
     if (IsEmpty()) return;  // Do nothing for empty descriptor array.
     FixedArray::cast(bridge_storage)->
       set(kEnumCacheBridgeCacheIndex, new_cache);
-    fast_set(FixedArray::cast(bridge_storage),
-             kEnumCacheBridgeEnumIndex,
-             get(kEnumerationIndexIndex));
+    NoWriteBarrierSet(FixedArray::cast(bridge_storage),
+                      kEnumCacheBridgeEnumIndex,
+                      get(kEnumerationIndexIndex));
     set(kEnumerationIndexIndex, bridge_storage);
   }
 }
@@ -4808,10 +5524,16 @@
       ++new_size;
     }
   }
+
+  DescriptorArray* new_descriptors;
   { MaybeObject* maybe_result = Allocate(new_size);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+    if (!maybe_result->To<DescriptorArray>(&new_descriptors)) {
+      return maybe_result;
+    }
   }
-  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+  DescriptorArray::WhitenessWitness witness(new_descriptors);
+
   // Set the enumeration index in the descriptors and set the enumeration index
   // in the result.
   int enumeration_index = NextEnumerationIndex();
@@ -4839,16 +5561,16 @@
     }
     if (IsNullDescriptor(from_index)) continue;
     if (remove_transitions && IsTransition(from_index)) continue;
-    new_descriptors->CopyFrom(to_index++, this, from_index);
+    new_descriptors->CopyFrom(to_index++, this, from_index, witness);
   }
 
-  new_descriptors->Set(to_index++, descriptor);
+  new_descriptors->Set(to_index++, descriptor, witness);
   if (replacing) from_index++;
 
   for (; from_index < number_of_descriptors(); from_index++) {
     if (IsNullDescriptor(from_index)) continue;
     if (remove_transitions && IsTransition(from_index)) continue;
-    new_descriptors->CopyFrom(to_index++, this, from_index);
+    new_descriptors->CopyFrom(to_index++, this, from_index, witness);
   }
 
   ASSERT(to_index == new_descriptors->number_of_descriptors());
@@ -4870,16 +5592,21 @@
   }
 
   // Allocate the new descriptor array.
-  Object* result;
+  DescriptorArray* new_descriptors;
   { MaybeObject* maybe_result = Allocate(number_of_descriptors() - num_removed);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
+    if (!maybe_result->To<DescriptorArray>(&new_descriptors)) {
+      return maybe_result;
+    }
   }
-  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
+  DescriptorArray::WhitenessWitness witness(new_descriptors);
 
   // Copy the content.
   int next_descriptor = 0;
   for (int i = 0; i < number_of_descriptors(); i++) {
-    if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
+    if (IsProperty(i)) {
+      new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
+    }
   }
   ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
 
@@ -4887,7 +5614,7 @@
 }
 
 
-void DescriptorArray::SortUnchecked() {
+void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
   // In-place heap sort.
   int len = number_of_descriptors();
 
@@ -4908,7 +5635,7 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      Swap(parent_index, child_index);
+      NoWriteBarrierSwapDescriptors(parent_index, child_index);
       // Now element at child_index could be < its children.
       parent_index = child_index;  // parent_hash remains correct.
     }
@@ -4917,8 +5644,8 @@
   // Extract elements and create sorted array.
   for (int i = len - 1; i > 0; --i) {
     // Put max element at the back of the array.
-    Swap(0, i);
-    // Sift down the new top element.
+    NoWriteBarrierSwapDescriptors(0, i);
+    // Shift down the new top element.
     int parent_index = 0;
     const uint32_t parent_hash = GetKey(parent_index)->Hash();
     const int max_parent_index = (i / 2) - 1;
@@ -4933,15 +5660,15 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      Swap(parent_index, child_index);
+      NoWriteBarrierSwapDescriptors(parent_index, child_index);
       parent_index = child_index;
     }
   }
 }
 
 
-void DescriptorArray::Sort() {
-  SortUnchecked();
+void DescriptorArray::Sort(const WhitenessWitness& witness) {
+  SortUnchecked(witness);
   SLOW_ASSERT(IsSortedNoDuplicates());
 }
 
@@ -5026,24 +5753,6 @@
 }
 
 
-int String::Utf8Length() {
-  if (IsAsciiRepresentation()) return length();
-  // Attempt to flatten before accessing the string.  It probably
-  // doesn't make Utf8Length faster, but it is very likely that
-  // the string will be accessed later (for example by WriteUtf8)
-  // so it's still a good idea.
-  Heap* heap = GetHeap();
-  TryFlatten();
-  Access<StringInputBuffer> buffer(
-      heap->isolate()->objects_string_input_buffer());
-  buffer->Reset(0, this);
-  int result = 0;
-  while (buffer->has_more())
-    result += unibrow::Utf8::Length(buffer->GetNext());
-  return result;
-}
-
-
 String::FlatContent String::GetFlatContent() {
   int length = this->length();
   StringShape shape(this);
@@ -5070,7 +5779,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqAsciiString::cast(string)->GetChars();
     } else {
-      start = ExternalAsciiString::cast(string)->resource()->data();
+      start = ExternalAsciiString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const char>(start + offset, length));
   } else {
@@ -5079,7 +5788,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqTwoByteString::cast(string)->GetChars();
     } else {
-      start = ExternalTwoByteString::cast(string)->resource()->data();
+      start = ExternalTwoByteString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const uc16>(start + offset, length));
   }
@@ -5105,12 +5814,9 @@
   buffer->Reset(offset, this);
   int character_position = offset;
   int utf8_bytes = 0;
-  while (buffer->has_more()) {
+  while (buffer->has_more() && character_position++ < offset + length) {
     uint16_t character = buffer->GetNext();
-    if (character_position < offset + length) {
-      utf8_bytes += unibrow::Utf8::Length(character);
-    }
-    character_position++;
+    utf8_bytes += unibrow::Utf8::Length(character);
   }
 
   if (length_return) {
@@ -5124,16 +5830,13 @@
   buffer->Seek(offset);
   character_position = offset;
   int utf8_byte_position = 0;
-  while (buffer->has_more()) {
+  while (buffer->has_more() && character_position++ < offset + length) {
     uint16_t character = buffer->GetNext();
-    if (character_position < offset + length) {
-      if (allow_nulls == DISALLOW_NULLS && character == 0) {
-        character = ' ';
-      }
-      utf8_byte_position +=
-          unibrow::Utf8::Encode(result + utf8_byte_position, character);
+    if (allow_nulls == DISALLOW_NULLS && character == 0) {
+      character = ' ';
     }
-    character_position++;
+    utf8_byte_position +=
+        unibrow::Utf8::Encode(result + utf8_byte_position, character);
   }
   result[utf8_byte_position] = 0;
   return SmartArrayPointer<char>(result);
@@ -5318,44 +6021,26 @@
 }
 
 
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
       unsigned* remaining,
       unsigned* offset_ptr,
       unsigned max_chars) {
   // Cast const char* to unibrow::byte* (signedness difference).
   const unibrow::byte* b =
-      reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+      reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
   *remaining = max_chars;
   *offset_ptr += max_chars;
   return b;
 }
 
 
-const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
-      unsigned start) {
-  return resource()->data() + start;
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
       ReadBlockBuffer* rbb,
       unsigned* offset_ptr,
       unsigned max_chars) {
   unsigned chars_read = 0;
   unsigned offset = *offset_ptr;
-  const uint16_t* data = resource()->data();
+  const uint16_t* data = GetChars();
   while (chars_read < max_chars) {
     uint16_t c = data[offset];
     if (c <= kMaxAsciiCharCode) {
@@ -5401,9 +6086,7 @@
       unsigned max_chars) {
   unsigned capacity = rbb->capacity - rbb->cursor;
   if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor,
-         resource()->data() + *offset_ptr,
-         max_chars);
+  memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
   rbb->remaining += max_chars;
   *offset_ptr += max_chars;
   rbb->cursor += max_chars;
@@ -5467,6 +6150,73 @@
 }
 
 
+// This method determines the type of string involved and then gets the UTF8
+// length of the string.  It doesn't flatten the string and has log(n) recursion
+// for a string of length n.
+int String::Utf8Length(String* input, int from, int to) {
+  if (from == to) return 0;
+  int total = 0;
+  while (true) {
+    if (input->IsAsciiRepresentation()) return total + to - from;
+    switch (StringShape(input).representation_tag()) {
+      case kConsStringTag: {
+        ConsString* str = ConsString::cast(input);
+        String* first = str->first();
+        String* second = str->second();
+        int first_length = first->length();
+        if (first_length - from < to - first_length) {
+          if (first_length > from) {
+            // Left hand side is shorter.
+            total += Utf8Length(first, from, first_length);
+            input = second;
+            from = 0;
+            to -= first_length;
+          } else {
+            // We only need the right hand side.
+            input = second;
+            from -= first_length;
+            to -= first_length;
+          }
+        } else {
+          if (first_length <= to) {
+            // Right hand side is shorter.
+            total += Utf8Length(second, 0, to - first_length);
+            input = first;
+            to = first_length;
+          } else {
+            // We only need the left hand side.
+            input = first;
+          }
+        }
+        continue;
+      }
+      case kExternalStringTag:
+      case kSeqStringTag: {
+        Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
+        const uc16* p = vector.start();
+        for (int i = from; i < to; i++) {
+          total += unibrow::Utf8::Length(p[i]);
+        }
+        return total;
+      }
+      case kSlicedStringTag: {
+        SlicedString* str = SlicedString::cast(input);
+        int offset = str->offset();
+        input = str->parent();
+        from += offset;
+        to += offset;
+        continue;
+      }
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return 0;
+  }
+  return 0;
+}
+
+
 void Relocatable::PostGarbageCollectionProcessing() {
   Isolate* isolate = Isolate::Current();
   Relocatable* current = isolate->relocatable_top();
@@ -5778,13 +6528,13 @@
     switch (StringShape(source).full_representation_tag()) {
       case kAsciiStringTag | kExternalStringTag: {
         CopyChars(sink,
-                  ExternalAsciiString::cast(source)->resource()->data() + from,
+                  ExternalAsciiString::cast(source)->GetChars() + from,
                   to - from);
         return;
       }
       case kTwoByteStringTag | kExternalStringTag: {
         const uc16* data =
-            ExternalTwoByteString::cast(source)->resource()->data();
+            ExternalTwoByteString::cast(source)->GetChars();
         CopyChars(sink,
                   data + from,
                   to - from);
@@ -5929,20 +6679,6 @@
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      if (Hash() != other->Hash()) {
-        bool found_difference = false;
-        for (int i = 0; i < len; i++) {
-          if (Get(i) != other->Get(i)) {
-            found_difference = true;
-            break;
-          }
-        }
-        ASSERT(found_difference);
-      }
-    }
-#endif
     if (Hash() != other->Hash()) return false;
   }
 
@@ -6012,7 +6748,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = map->heap();
+  Heap* heap = GetHeap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -6078,16 +6814,12 @@
   // Compute the hash code.
   uint32_t field = 0;
   if (StringShape(this).IsSequentialAscii()) {
-    field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
-                                 len,
-                                 GetHeap()->HashSeed());
+    field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), len);
   } else if (StringShape(this).IsSequentialTwoByte()) {
-    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
-                                 len,
-                                 GetHeap()->HashSeed());
+    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), len);
   } else {
     StringInputBuffer buffer(this);
-    field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
+    field = ComputeHashField(&buffer, len);
   }
 
   // Store the hash code in the object.
@@ -6178,9 +6910,8 @@
 
 
 uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
-                                  int length,
-                                  uint32_t seed) {
-  StringHasher hasher(length, seed);
+                                  int length) {
+  StringHasher hasher(length);
 
   // Very long strings have a trivial hash that doesn't inspect the
   // string contents.
@@ -6220,29 +6951,43 @@
 }
 
 
+void Map::CreateOneBackPointer(Map* target) {
+#ifdef DEBUG
+  // Verify target.
+  Object* source_prototype = prototype();
+  Object* target_prototype = target->prototype();
+  ASSERT(source_prototype->IsJSReceiver() ||
+         source_prototype->IsMap() ||
+         source_prototype->IsNull());
+  ASSERT(target_prototype->IsJSReceiver() ||
+         target_prototype->IsNull());
+  ASSERT(source_prototype->IsMap() ||
+         source_prototype == target_prototype);
+#endif
+  // Point target back to source.  set_prototype() will not let us set
+  // the prototype to a map, as we do here.
+  *RawField(target, kPrototypeOffset) = this;
+}
+
+
 void Map::CreateBackPointers() {
   DescriptorArray* descriptors = instance_descriptors();
   for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
-    if (descriptors->GetType(i) == MAP_TRANSITION ||
-        descriptors->GetType(i) == ELEMENTS_TRANSITION ||
-        descriptors->GetType(i) == CONSTANT_TRANSITION) {
-      // Get target.
-      Map* target = Map::cast(descriptors->GetValue(i));
-#ifdef DEBUG
-      // Verify target.
-      Object* source_prototype = prototype();
-      Object* target_prototype = target->prototype();
-      ASSERT(source_prototype->IsJSObject() ||
-             source_prototype->IsMap() ||
-             source_prototype->IsNull());
-      ASSERT(target_prototype->IsJSObject() ||
-             target_prototype->IsNull());
-      ASSERT(source_prototype->IsMap() ||
-             source_prototype == target_prototype);
-#endif
-      // Point target back to source.  set_prototype() will not let us set
-      // the prototype to a map, as we do here.
-      *RawField(target, kPrototypeOffset) = this;
+    if (descriptors->IsTransition(i)) {
+      Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
+      if (object->IsMap()) {
+        CreateOneBackPointer(reinterpret_cast<Map*>(object));
+      } else {
+        ASSERT(object->IsFixedArray());
+        ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION);
+        FixedArray* array = reinterpret_cast<FixedArray*>(object);
+        for (int i = 0; i < array->length(); ++i) {
+          Map* target = reinterpret_cast<Map*>(array->get(i));
+          if (!target->IsUndefined()) {
+            CreateOneBackPointer(target);
+          }
+        }
+      }
     }
   }
 }
@@ -6266,19 +7011,47 @@
     // map is not reached again by following a back pointer from a
     // non-live object.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    if (details.type() == MAP_TRANSITION ||
-        details.type() == ELEMENTS_TRANSITION ||
-        details.type() == CONSTANT_TRANSITION) {
-      Map* target = reinterpret_cast<Map*>(contents->get(i));
-      ASSERT(target->IsHeapObject());
-      if (!target->IsMarked()) {
-        ASSERT(target->IsMap());
-        contents->set_unchecked(i + 1, NullDescriptorDetails);
-        contents->set_null_unchecked(heap, i);
-        ASSERT(target->prototype() == this ||
-               target->prototype() == real_prototype);
-        // Getter prototype() is read-only, set_prototype() has side effects.
-        *RawField(target, Map::kPrototypeOffset) = real_prototype;
+    if (IsTransitionType(details.type())) {
+      Object* object = reinterpret_cast<Object*>(contents->get(i));
+      if (object->IsMap()) {
+        Map* target = reinterpret_cast<Map*>(object);
+        ASSERT(target->IsHeapObject());
+        MarkBit map_mark = Marking::MarkBitFrom(target);
+        if (!map_mark.Get()) {
+          ASSERT(target->IsMap());
+          contents->set_unchecked(i + 1, NullDescriptorDetails);
+          contents->set_null_unchecked(heap, i);
+          ASSERT(target->prototype() == this ||
+                 target->prototype() == real_prototype);
+          // Getter prototype() is read-only, set_prototype() has side effects.
+          *RawField(target, Map::kPrototypeOffset) = real_prototype;
+        }
+      } else {
+        ASSERT(object->IsFixedArray());
+        ASSERT(details.type() == ELEMENTS_TRANSITION);
+        FixedArray* array = reinterpret_cast<FixedArray*>(object);
+        bool reachable_map_found = false;
+        for (int j = 0; j < array->length(); ++j) {
+          Map* target = reinterpret_cast<Map*>(array->get(j));
+          ASSERT(target->IsHeapObject());
+          MarkBit map_mark = Marking::MarkBitFrom(target);
+          if (!map_mark.Get()) {
+            ASSERT(target->IsMap());
+            array->set_undefined(j);
+            ASSERT(target->prototype() == this ||
+                   target->prototype() == real_prototype);
+            // Getter prototype() is read-only, set_prototype() has side
+            // effects.
+            *RawField(target, Map::kPrototypeOffset) = real_prototype;
+          } else if (target->IsMap()) {
+            reachable_map_found = true;
+          }
+        }
+        // If no map was found, make sure the FixedArray also gets collected.
+        if (!reachable_map_found) {
+          contents->set_unchecked(i + 1, NullDescriptorDetails);
+          contents->set_null_unchecked(heap, i);
+        }
       }
     }
   }
@@ -6337,6 +7110,57 @@
 }
 
 
+bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                                        ClearExceptionFlag flag) {
+  return shared->is_compiled() || CompileLazy(shared, flag);
+}
+
+
+static bool CompileLazyHelper(CompilationInfo* info,
+                              ClearExceptionFlag flag) {
+  // Compile the source information to a code object.
+  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
+  ASSERT(!info->isolate()->has_pending_exception());
+  bool result = Compiler::CompileLazy(info);
+  ASSERT(result != Isolate::Current()->has_pending_exception());
+  if (!result && flag == CLEAR_EXCEPTION) {
+    info->isolate()->clear_pending_exception();
+  }
+  return result;
+}
+
+
+bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
+                                     ClearExceptionFlag flag) {
+  CompilationInfo info(shared);
+  return CompileLazyHelper(&info, flag);
+}
+
+
+bool JSFunction::CompileLazy(Handle<JSFunction> function,
+                             ClearExceptionFlag flag) {
+  bool result = true;
+  if (function->shared()->is_compiled()) {
+    function->ReplaceCode(function->shared()->code());
+    function->shared()->set_code_age(0);
+  } else {
+    CompilationInfo info(function);
+    result = CompileLazyHelper(&info, flag);
+    ASSERT(!result || function->is_compiled());
+  }
+  return result;
+}
+
+
+bool JSFunction::CompileOptimized(Handle<JSFunction> function,
+                                  int osr_ast_id,
+                                  ClearExceptionFlag flag) {
+  CompilationInfo info(function);
+  info.SetOptimizing(osr_ast_id);
+  return CompileLazyHelper(&info, flag);
+}
+
+
 bool JSFunction::IsInlineable() {
   if (IsBuiltin()) return false;
   SharedFunctionInfo* shared_info = shared();
@@ -6384,7 +7208,7 @@
       if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
     }
     Map* new_map = Map::cast(new_object);
-    Heap* heap = new_map->heap();
+    Heap* heap = new_map->GetHeap();
     set_map(new_map);
     new_map->set_constructor(value);
     new_map->set_non_instance_prototype(true);
@@ -6401,21 +7225,21 @@
 
 Object* JSFunction::RemovePrototype() {
   Context* global_context = context()->global_context();
-  Map* no_prototype_map = shared()->strict_mode()
-      ? global_context->strict_mode_function_without_prototype_map()
-      : global_context->function_without_prototype_map();
+  Map* no_prototype_map = shared()->is_classic_mode()
+      ? global_context->function_without_prototype_map()
+      : global_context->strict_mode_function_without_prototype_map();
 
   if (map() == no_prototype_map) {
     // Be idempotent.
     return this;
   }
 
-  ASSERT(!shared()->strict_mode() ||
-         map() == global_context->strict_mode_function_map());
-  ASSERT(shared()->strict_mode() || map() == global_context->function_map());
+  ASSERT(map() == (shared()->is_classic_mode()
+                   ? global_context->function_map()
+                   : global_context->strict_mode_function_map()));
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
   return this;
 }
 
@@ -6519,7 +7343,7 @@
        obj = obj->GetPrototype()) {
     JSObject* js_object = JSObject::cast(obj);
     for (int i = 0; i < this_property_assignments_count(); i++) {
-      LookupResult result;
+      LookupResult result(heap->isolate());
       String* name = GetThisPropertyAssignmentName(i);
       js_object->LocalLookupRealNamedProperty(name, &result);
       if (result.IsProperty() && result.type() == CALLBACKS) {
@@ -6708,6 +7532,8 @@
 void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
   ASSERT(!IsInobjectSlackTrackingInProgress());
 
+  if (!FLAG_clever_optimizations) return;
+
   // Only initiate the tracking the first time.
   if (live_objects_may_exist()) return;
   set_live_objects_may_exist(true);
@@ -6723,7 +7549,7 @@
     set_construction_count(kGenerousAllocationCount);
   }
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             construct_stub());
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6743,8 +7569,9 @@
   // then StartInobjectTracking will be called again the next time the
   // constructor is called. The countdown will continue and (possibly after
   // several more GCs) CompleteInobjectSlackTracking will eventually be called.
-  set_initial_map(map->heap()->raw_unchecked_undefined_value());
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Heap* heap = map->GetHeap();
+  set_initial_map(heap->raw_unchecked_undefined_value());
+  Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -6760,7 +7587,7 @@
 
   // Resume inobject slack tracking.
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6792,7 +7619,7 @@
   ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
   Map* map = Map::cast(initial_map());
 
-  Heap* heap = map->heap();
+  Heap* heap = map->GetHeap();
   set_initial_map(heap->undefined_value());
   Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -6854,8 +7681,18 @@
 }
 
 
+void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
+  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+  VisitPointer(rinfo->target_object_address());
+}
+
+void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
+  Address* p = rinfo->target_reference_address();
+  VisitExternalReferences(p, p + 1);
+}
+
 void Code::InvalidateRelocation() {
-  set_relocation_info(heap()->empty_byte_array());
+  set_relocation_info(GetHeap()->empty_byte_array());
 }
 
 
@@ -6868,6 +7705,8 @@
 
 
 void Code::CopyFrom(const CodeDesc& desc) {
+  ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
+
   // copy code
   memmove(instruction_start(), desc.buffer, desc.instr_size);
 
@@ -6887,16 +7726,17 @@
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
-      it.rinfo()->set_target_object(*p);
+      it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
     } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-      Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
-      it.rinfo()->set_target_cell(*cell);
+      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
+      it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
-      it.rinfo()->set_target_address(code->instruction_start());
+      it.rinfo()->set_target_address(code->instruction_start(),
+                                     SKIP_WRITE_BARRIER);
     } else {
       it.rinfo()->apply(delta);
     }
@@ -7163,7 +8003,7 @@
     case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
     case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return NULL;
 }
 
@@ -7282,7 +8122,7 @@
 }
 
 
-static void CopySlowElementsToFast(SeededNumberDictionary* source,
+static void CopySlowElementsToFast(NumberDictionary* source,
                                    FixedArray* destination,
                                    WriteBarrierMode mode) {
   for (int i = 0; i < source->Capacity(); ++i) {
@@ -7295,8 +8135,10 @@
 }
 
 
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
-                                                        int length) {
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(
+    int capacity,
+    int length,
+    SetFastElementsCapacityMode set_capacity_mode) {
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
@@ -7313,16 +8155,27 @@
   Map* new_map = NULL;
   if (elements()->map() != heap->non_strict_arguments_elements_map()) {
     Object* object;
-    MaybeObject* maybe = map()->GetFastElementsMap();
+    bool has_fast_smi_only_elements =
+        (set_capacity_mode == kAllowSmiOnlyElements) &&
+        (elements()->map()->has_fast_smi_only_elements() ||
+         elements() == heap->empty_fixed_array());
+    ElementsKind elements_kind = has_fast_smi_only_elements
+        ? FAST_SMI_ONLY_ELEMENTS
+        : FAST_ELEMENTS;
+    MaybeObject* maybe = GetElementsTransitionMap(elements_kind);
     if (!maybe->ToObject(&object)) return maybe;
     new_map = Map::cast(object);
   }
 
-  switch (GetElementsKind()) {
+  FixedArrayBase* old_elements_raw = elements();
+  ElementsKind elements_kind = GetElementsKind();
+  switch (elements_kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       AssertNoAllocation no_gc;
-      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
-      CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+      WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc));
+      CopyFastElementsToFast(FixedArray::cast(old_elements_raw),
+                             new_elements, mode);
       set_map(new_map);
       set_elements(new_elements);
       break;
@@ -7330,7 +8183,7 @@
     case DICTIONARY_ELEMENTS: {
       AssertNoAllocation no_gc;
       WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
-      CopySlowElementsToFast(SeededNumberDictionary::cast(elements()),
+      CopySlowElementsToFast(NumberDictionary::cast(old_elements_raw),
                              new_elements,
                              mode);
       set_map(new_map);
@@ -7342,10 +8195,10 @@
       WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
       // The object's map and the parameter map are unchanged, the unaliased
       // arguments are copied to the new backing store.
-      FixedArray* parameter_map = FixedArray::cast(elements());
+      FixedArray* parameter_map = FixedArray::cast(old_elements_raw);
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
       if (arguments->IsDictionary()) {
-        CopySlowElementsToFast(SeededNumberDictionary::cast(arguments),
+        CopySlowElementsToFast(NumberDictionary::cast(arguments),
                                new_elements,
                                mode);
       } else {
@@ -7355,7 +8208,7 @@
       break;
     }
     case FAST_DOUBLE_ELEMENTS: {
-      FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+      FixedDoubleArray* old_elements = FixedDoubleArray::cast(old_elements_raw);
       uint32_t old_length = static_cast<uint32_t>(old_elements->length());
       // Fill out the new array with this content and array holes.
       for (uint32_t i = 0; i < old_length; i++) {
@@ -7393,6 +8246,11 @@
       break;
   }
 
+  if (FLAG_trace_elements_transitions) {
+    PrintElementsTransition(stdout, elements_kind, old_elements_raw,
+                            FAST_ELEMENTS, new_elements);
+  }
+
   // Update the length if necessary.
   if (IsJSArray()) {
     JSArray::cast(this)->set_length(Smi::FromInt(length));
@@ -7416,23 +8274,27 @@
   }
   FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
 
-  { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
+  { MaybeObject* maybe_obj =
+        GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
 
+  FixedArrayBase* old_elements = elements();
+  ElementsKind elements_kind(GetElementsKind());
   AssertNoAllocation no_gc;
-  switch (GetElementsKind()) {
+  switch (elements_kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
-      elems->Initialize(FixedArray::cast(elements()));
+      elems->Initialize(FixedArray::cast(old_elements));
       break;
     }
     case FAST_DOUBLE_ELEMENTS: {
-      elems->Initialize(FixedDoubleArray::cast(elements()));
+      elems->Initialize(FixedDoubleArray::cast(old_elements));
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      elems->Initialize(SeededNumberDictionary::cast(elements()));
+      elems->Initialize(NumberDictionary::cast(old_elements));
       break;
     }
     default:
@@ -7440,6 +8302,11 @@
       break;
   }
 
+  if (FLAG_trace_elements_transitions) {
+    PrintElementsTransition(stdout, elements_kind, old_elements,
+                            FAST_DOUBLE_ELEMENTS, elems);
+  }
+
   ASSERT(new_map->has_fast_double_elements());
   set_map(new_map);
   ASSERT(elems->IsFixedDoubleArray());
@@ -7453,53 +8320,6 @@
 }
 
 
-MaybeObject* JSObject::SetSlowElements(Object* len) {
-  // We should never end in here with a pixel or external array.
-  ASSERT(!HasExternalArrayElements());
-
-  uint32_t new_length = static_cast<uint32_t>(len->Number());
-
-  switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-    case FAST_DOUBLE_ELEMENTS:
-      // Make sure we never try to shrink dense arrays into sparse arrays.
-      ASSERT(static_cast<uint32_t>(
-          FixedArrayBase::cast(elements())->length()) <= new_length);
-      MaybeObject* result = NormalizeElements();
-      if (result->IsFailure()) return result;
-
-      // Update length for JSArrays.
-      if (IsJSArray()) JSArray::cast(this)->set_length(len);
-      break;
-    }
-    case DICTIONARY_ELEMENTS: {
-      if (IsJSArray()) {
-        uint32_t old_length =
-            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
-        element_dictionary()->RemoveNumberEntries(new_length, old_length),
-        JSArray::cast(this)->set_length(len);
-      }
-      break;
-    }
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-      UNIMPLEMENTED();
-      break;
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
-  return this;
-}
-
-
 MaybeObject* JSArray::Initialize(int capacity) {
   Heap* heap = GetHeap();
   ASSERT(capacity >= 0);
@@ -7527,155 +8347,14 @@
   Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
   // Can't use this any more now because we may have had a GC!
   for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
-  self->SetContent(*new_backing);
-}
-
-
-static Failure* ArrayLengthRangeError(Heap* heap) {
-  HandleScope scope(heap->isolate());
-  return heap->isolate()->Throw(
-      *FACTORY->NewRangeError("invalid_array_length",
-          HandleVector<Object>(NULL, 0)));
+  GetIsolate()->factory()->SetContent(self, new_backing);
 }
 
 
 MaybeObject* JSObject::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
-
-  MaybeObject* maybe_smi_length = len->ToSmi();
-  Object* smi_length = Smi::FromInt(0);
-  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
-    const int value = Smi::cast(smi_length)->value();
-    if (value < 0) return ArrayLengthRangeError(GetHeap());
-    ElementsKind elements_kind = GetElementsKind();
-    switch (elements_kind) {
-      case FAST_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS: {
-        int old_capacity = FixedArrayBase::cast(elements())->length();
-        if (value <= old_capacity) {
-          if (IsJSArray()) {
-            Object* obj;
-            if (elements_kind == FAST_ELEMENTS) {
-              MaybeObject* maybe_obj = EnsureWritableFastElements();
-              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-            }
-            if (2 * value <= old_capacity) {
-              // If more than half the elements won't be used, trim the array.
-              if (value == 0) {
-                initialize_elements();
-              } else {
-                Address filler_start;
-                int filler_size;
-                if (GetElementsKind() == FAST_ELEMENTS) {
-                  FixedArray* fast_elements = FixedArray::cast(elements());
-                  fast_elements->set_length(value);
-                  filler_start = fast_elements->address() +
-                      FixedArray::OffsetOfElementAt(value);
-                  filler_size = (old_capacity - value) * kPointerSize;
-                } else {
-                  ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-                  FixedDoubleArray* fast_double_elements =
-                      FixedDoubleArray::cast(elements());
-                  fast_double_elements->set_length(value);
-                  filler_start = fast_double_elements->address() +
-                      FixedDoubleArray::OffsetOfElementAt(value);
-                  filler_size = (old_capacity - value) * kDoubleSize;
-                }
-                GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
-              }
-            } else {
-              // Otherwise, fill the unused tail with holes.
-              int old_length = FastD2I(JSArray::cast(this)->length()->Number());
-              if (GetElementsKind() == FAST_ELEMENTS) {
-                FixedArray* fast_elements = FixedArray::cast(elements());
-                for (int i = value; i < old_length; i++) {
-                  fast_elements->set_the_hole(i);
-                }
-              } else {
-                ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-                FixedDoubleArray* fast_double_elements =
-                    FixedDoubleArray::cast(elements());
-                for (int i = value; i < old_length; i++) {
-                  fast_double_elements->set_the_hole(i);
-                }
-              }
-            }
-            JSArray::cast(this)->set_length(Smi::cast(smi_length));
-          }
-          return this;
-        }
-        int min = NewElementsCapacity(old_capacity);
-        int new_capacity = value > min ? value : min;
-        if (!ShouldConvertToSlowElements(new_capacity)) {
-          MaybeObject* result;
-          if (GetElementsKind() == FAST_ELEMENTS) {
-            result = SetFastElementsCapacityAndLength(new_capacity, value);
-          }  else {
-            ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-            result = SetFastDoubleElementsCapacityAndLength(new_capacity,
-                                                            value);
-          }
-          if (result->IsFailure()) return result;
-          return this;
-        }
-        break;
-      }
-      case DICTIONARY_ELEMENTS: {
-        if (IsJSArray()) {
-          if (value == 0) {
-            // If the length of a slow array is reset to zero, we clear
-            // the array and flush backing storage. This has the added
-            // benefit that the array returns to fast mode.
-            Object* obj;
-            { MaybeObject* maybe_obj = ResetElements();
-              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-            }
-          } else {
-            // Remove deleted elements.
-            uint32_t old_length =
-            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
-            element_dictionary()->RemoveNumberEntries(value, old_length);
-          }
-          JSArray::cast(this)->set_length(Smi::cast(smi_length));
-        }
-        return this;
-      }
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-      case EXTERNAL_PIXEL_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-
-  // General slow case.
-  if (len->IsNumber()) {
-    uint32_t length;
-    if (len->ToArrayIndex(&length)) {
-      return SetSlowElements(len);
-    } else {
-      return ArrayLengthRangeError(GetHeap());
-    }
-  }
-
-  // len is not a number so make the array size one and
-  // set only element to len.
-  Object* obj;
-  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  FixedArray::cast(obj)->set(0, len);
-  if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
-  set_elements(FixedArray::cast(obj));
-  return this;
+  return GetElementsAccessor()->SetLength(this, len);
 }
 
 
@@ -7718,7 +8397,7 @@
     FixedArray* new_cache;
     // Grow array by factor 2 over and above what we need.
     { MaybeObject* maybe_cache =
-          heap()->AllocateFixedArray(transitions * 2 * step + header);
+          GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
       if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
     }
 
@@ -7771,7 +8450,7 @@
   // It is sufficient to validate that the receiver is not in the new prototype
   // chain.
   for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
-    if (JSObject::cast(pt) == this) {
+    if (JSReceiver::cast(pt) == this) {
       // Cycle detected.
       HandleScope scope(heap->isolate());
       return heap->isolate()->Throw(
@@ -7786,8 +8465,8 @@
     // hidden and set the new prototype on that object.
     Object* current_proto = real_receiver->GetPrototype();
     while (current_proto->IsJSObject() &&
-          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
-      real_receiver = JSObject::cast(current_proto);
+          JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
+      real_receiver = JSReceiver::cast(current_proto);
       current_proto = current_proto->GetPrototype();
     }
   }
@@ -7820,8 +8499,21 @@
 }
 
 
+MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
+                                                uint32_t first_arg,
+                                                uint32_t arg_count) {
+  // Elements in |Arguments| are ordered backwards (because they're on the
+  // stack), but the method that's called here iterates over them in forward
+  // direction.
+  return EnsureCanContainElements(
+      args->arguments() - first_arg - (arg_count - 1),
+      arg_count);
+}
+
+
 bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -7867,7 +8559,7 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
-          != SeededNumberDictionary::kNotFound) {
+          != NumberDictionary::kNotFound) {
         return true;
       }
       break;
@@ -7882,6 +8574,11 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
@@ -7958,6 +8655,7 @@
   }
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -7999,7 +8697,7 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index) !=
-          SeededNumberDictionary::kNotFound) {
+          NumberDictionary::kNotFound) {
         return DICTIONARY_ELEMENT;
       }
       break;
@@ -8016,9 +8714,8 @@
       // If not aliased, check the arguments.
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
       if (arguments->IsDictionary()) {
-        SeededNumberDictionary* dictionary =
-            SeededNumberDictionary::cast(arguments);
-        if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
+        NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+        if (dictionary->FindEntry(index) != NumberDictionary::kNotFound) {
           return DICTIONARY_ELEMENT;
         }
       } else {
@@ -8047,8 +8744,8 @@
       return true;
     }
   } else {
-    if (SeededNumberDictionary::cast(elements)->FindEntry(index) !=
-        SeededNumberDictionary::kNotFound) {
+    if (NumberDictionary::cast(elements)->FindEntry(index) !=
+        NumberDictionary::kNotFound) {
       return true;
     }
   }
@@ -8073,6 +8770,7 @@
 
   ElementsKind kind = GetElementsKind();
   switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -8114,7 +8812,7 @@
     }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
-          != SeededNumberDictionary::kNotFound) {
+          != NumberDictionary::kNotFound) {
         return true;
       }
       break;
@@ -8139,6 +8837,11 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
@@ -8215,9 +8918,9 @@
   // __defineGetter__ callback
   if (structure->IsFixedArray()) {
     Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -8272,8 +8975,9 @@
 
   if (structure->IsFixedArray()) {
     Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -8323,7 +9027,8 @@
                                       Object* value,
                                       StrictModeFlag strict_mode,
                                       bool check_prototype) {
-  ASSERT(HasFastElements() || HasFastArgumentsElements());
+  ASSERT(HasFastTypeElements() ||
+         HasFastArgumentsElements());
 
   FixedArray* backing_store = FixedArray::cast(elements());
   if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
@@ -8334,10 +9039,10 @@
     if (!maybe->ToObject(&writable)) return maybe;
     backing_store = FixedArray::cast(writable);
   }
-  uint32_t length = static_cast<uint32_t>(backing_store->length());
+  uint32_t capacity = static_cast<uint32_t>(backing_store->length());
 
   if (check_prototype &&
-      (index >= length || backing_store->get(index)->IsTheHole())) {
+      (index >= capacity || backing_store->get(index)->IsTheHole())) {
     bool found;
     MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
                                                                    value,
@@ -8346,39 +9051,75 @@
     if (found) return result;
   }
 
-  // Check whether there is extra space in fixed array.
-  if (index < length) {
-    backing_store->set(index, value);
-    if (IsJSArray()) {
-      // Update the length of the array if needed.
-      uint32_t array_length = 0;
-      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
-      if (index >= array_length) {
-        JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+  uint32_t new_capacity = capacity;
+  // Check if the length property of this object needs to be updated.
+  uint32_t array_length = 0;
+  bool must_update_array_length = false;
+  if (IsJSArray()) {
+    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+    if (index >= array_length) {
+      must_update_array_length = true;
+      array_length = index + 1;
+    }
+  }
+  // Check if the capacity of the backing store needs to be increased, or if
+  // a transition to slow elements is necessary.
+  if (index >= capacity) {
+    bool convert_to_slow = true;
+    if ((index - capacity) < kMaxGap) {
+      new_capacity = NewElementsCapacity(index + 1);
+      ASSERT(new_capacity > index);
+      if (!ShouldConvertToSlowElements(new_capacity)) {
+        convert_to_slow = false;
       }
     }
-    return value;
-  }
-
-  // Allow gap in fast case.
-  if ((index - length) < kMaxGap) {
-    // Try allocating extra space.
-    int new_capacity = NewElementsCapacity(index + 1);
-    if (!ShouldConvertToSlowElements(new_capacity)) {
-      ASSERT(static_cast<uint32_t>(new_capacity) > index);
-      Object* new_elements;
-      MaybeObject* maybe =
-          SetFastElementsCapacityAndLength(new_capacity, index + 1);
-      if (!maybe->ToObject(&new_elements)) return maybe;
-      FixedArray::cast(new_elements)->set(index, value);
-      return value;
+    if (convert_to_slow) {
+      MaybeObject* result = NormalizeElements();
+      if (result->IsFailure()) return result;
+      return SetDictionaryElement(index, value, strict_mode, check_prototype);
     }
   }
-
-  // Otherwise default to slow case.
-  MaybeObject* result = NormalizeElements();
-  if (result->IsFailure()) return result;
-  return SetDictionaryElement(index, value, strict_mode, check_prototype);
+  // Convert to fast double elements if appropriate.
+  if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
+    MaybeObject* maybe =
+        SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
+    if (maybe->IsFailure()) return maybe;
+    FixedDoubleArray::cast(elements())->set(index, value->Number());
+    return value;
+  }
+  // Change elements kind from SMI_ONLY to generic FAST if necessary.
+  if (HasFastSmiOnlyElements() && !value->IsSmi()) {
+    MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
+    Map* new_map;
+    if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
+    set_map(new_map);
+    if (FLAG_trace_elements_transitions) {
+      PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
+                              FAST_ELEMENTS, elements());
+    }
+  }
+  // Increase backing store capacity if that's been decided previously.
+  if (new_capacity != capacity) {
+    Object* new_elements;
+    SetFastElementsCapacityMode set_capacity_mode =
+        value->IsSmi() && HasFastSmiOnlyElements()
+            ? kAllowSmiOnlyElements
+            : kDontAllowSmiOnlyElements;
+    MaybeObject* maybe =
+        SetFastElementsCapacityAndLength(new_capacity,
+                                         array_length,
+                                         set_capacity_mode);
+    if (!maybe->ToObject(&new_elements)) return maybe;
+    FixedArray::cast(new_elements)->set(index, value);
+    return value;
+  }
+  // Finally, set the new element and length.
+  ASSERT(elements()->IsFixedArray());
+  backing_store->set(index, value);
+  if (must_update_array_length) {
+    JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+  }
+  return value;
 }
 
 
@@ -8394,15 +9135,15 @@
   FixedArray* elements = FixedArray::cast(this->elements());
   bool is_arguments =
       (elements->map() == heap->non_strict_arguments_elements_map());
-  SeededNumberDictionary* dictionary = NULL;
+  NumberDictionary* dictionary = NULL;
   if (is_arguments) {
-    dictionary = SeededNumberDictionary::cast(elements->get(1));
+    dictionary = NumberDictionary::cast(elements->get(1));
   } else {
-    dictionary = SeededNumberDictionary::cast(elements);
+    dictionary = NumberDictionary::cast(elements);
   }
 
   int entry = dictionary->FindEntry(index);
-  if (entry != SeededNumberDictionary::kNotFound) {
+  if (entry != NumberDictionary::kNotFound) {
     Object* element = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
     if (details.type() == CALLBACKS) {
@@ -8447,13 +9188,13 @@
     FixedArrayBase* new_dictionary;
     MaybeObject* maybe = dictionary->AtNumberPut(index, value);
     if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
-    if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+    if (dictionary != NumberDictionary::cast(new_dictionary)) {
       if (is_arguments) {
         elements->set(1, new_dictionary);
       } else {
         set_elements(new_dictionary);
       }
-      dictionary = SeededNumberDictionary::cast(new_dictionary);
+      dictionary = NumberDictionary::cast(new_dictionary);
     }
   }
 
@@ -8474,7 +9215,9 @@
     }
     MaybeObject* result = CanConvertToFastDoubleElements()
         ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
-        : SetFastElementsCapacityAndLength(new_length, new_length);
+        : SetFastElementsCapacityAndLength(new_length,
+                                           new_length,
+                                           kDontAllowSmiOnlyElements);
     if (result->IsFailure()) return result;
 #ifdef DEBUG
     if (FLAG_trace_normalization) {
@@ -8518,10 +9261,15 @@
     if (IsJSArray()) {
       CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
     }
-    MaybeObject* maybe_obj =
-        SetFastElementsCapacityAndLength(elms_length, length);
+    MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
+        elms_length,
+        length,
+        kDontAllowSmiOnlyElements);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    return SetFastElement(index, value, strict_mode, check_prototype);
+    return SetFastElement(index,
+                          value,
+                          strict_mode,
+                          check_prototype);
   }
 
   double double_value = value_is_smi
@@ -8572,6 +9320,17 @@
 }
 
 
+MaybeObject* JSReceiver::SetElement(uint32_t index,
+                                    Object* value,
+                                    StrictModeFlag strict_mode,
+                                    bool check_proto) {
+  return IsJSProxy()
+      ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
+      : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
+  ;
+}
+
+
 MaybeObject* JSObject::SetElement(uint32_t index,
                                   Object* value,
                                   StrictModeFlag strict_mode,
@@ -8618,6 +9377,7 @@
                                                     bool check_prototype) {
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       return SetFastElement(index, value, strict_mode, check_prototype);
     case FAST_DOUBLE_ELEMENTS:
@@ -8693,6 +9453,54 @@
 }
 
 
+MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
+    ElementsKind to_kind) {
+  ElementsKind from_kind = map()->elements_kind();
+  FixedArrayBase* elms = FixedArrayBase::cast(elements());
+  uint32_t capacity = static_cast<uint32_t>(elms->length());
+  uint32_t length = capacity;
+  if (IsJSArray()) {
+    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+  }
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
+    if (to_kind == FAST_DOUBLE_ELEMENTS) {
+      MaybeObject* maybe_result =
+          SetFastDoubleElementsCapacityAndLength(capacity, length);
+      if (maybe_result->IsFailure()) return maybe_result;
+      return this;
+    } else if (to_kind == FAST_ELEMENTS) {
+      MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
+      Map* new_map;
+      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+      if (FLAG_trace_elements_transitions) {
+        PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
+      }
+      set_map(new_map);
+      return this;
+    }
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
+        capacity, length, kDontAllowSmiOnlyElements);
+    if (maybe_result->IsFailure()) return maybe_result;
+    return this;
+  }
+  // This method should never be called for any other case than the ones
+  // handled above.
+  UNREACHABLE();
+  return GetIsolate()->heap()->null_value();
+}
+
+
+// static
+bool Map::IsValidElementsTransition(ElementsKind from_kind,
+                                    ElementsKind to_kind) {
+  return
+      (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+          (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
+      (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
+}
+
+
 MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
                                                    Object* value) {
   uint32_t old_len = 0;
@@ -8774,13 +9582,13 @@
           FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
       backing_store = FixedArray::cast(backing_store_base);
       if (backing_store->IsDictionary()) {
-        SeededNumberDictionary* dictionary =
-            SeededNumberDictionary::cast(backing_store);
+        NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
         *capacity = dictionary->Capacity();
         *used = dictionary->NumberOfElements();
         break;
       }
       // Fall through.
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       backing_store = FixedArray::cast(backing_store_base);
       *capacity = backing_store->length();
@@ -8789,8 +9597,8 @@
       }
       break;
     case DICTIONARY_ELEMENTS: {
-      SeededNumberDictionary* dictionary =
-          SeededNumberDictionary::cast(FixedArray::cast(elements()));
+      NumberDictionary* dictionary =
+          NumberDictionary::cast(FixedArray::cast(elements()));
       *capacity = dictionary->Capacity();
       *used = dictionary->NumberOfElements();
       break;
@@ -8835,8 +9643,8 @@
   int old_capacity = 0;
   int used_elements = 0;
   GetElementsCapacityAndUsage(&old_capacity, &used_elements);
-  int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
-      SeededNumberDictionary::kEntrySize;
+  int dictionary_size = NumberDictionary::ComputeCapacity(used_elements) *
+      NumberDictionary::kEntrySize;
   return 3 * dictionary_size <= new_capacity;
 }
 
@@ -8850,11 +9658,11 @@
   if (IsAccessCheckNeeded()) return false;
 
   FixedArray* elements = FixedArray::cast(this->elements());
-  SeededNumberDictionary* dictionary = NULL;
+  NumberDictionary* dictionary = NULL;
   if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
-    dictionary = SeededNumberDictionary::cast(elements->get(1));
+    dictionary = NumberDictionary::cast(elements->get(1));
   } else {
-    dictionary = SeededNumberDictionary::cast(elements);
+    dictionary = NumberDictionary::cast(elements);
   }
   // If an element has been added at a very high index in the elements
   // dictionary, we cannot go back to fast case.
@@ -8869,7 +9677,7 @@
     array_size = dictionary->max_number_key();
   }
   uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
-      SeededNumberDictionary::kEntrySize;
+      NumberDictionary::kEntrySize;
   return 2 * dictionary_size >= array_size;
 }
 
@@ -8877,8 +9685,7 @@
 bool JSObject::CanConvertToFastDoubleElements() {
   if (FLAG_unbox_double_arrays) {
     ASSERT(HasDictionaryElements());
-    SeededNumberDictionary* dictionary =
-        SeededNumberDictionary::cast(elements());
+    NumberDictionary* dictionary = NumberDictionary::cast(elements());
     for (int i = 0; i < dictionary->Capacity(); i++) {
       Object* key = dictionary->KeyAt(i);
       if (key->IsNumber()) {
@@ -8960,7 +9767,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -8978,7 +9785,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result;
+  LookupResult result(GetIsolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -9029,15 +9836,15 @@
 
 bool JSObject::HasRealNamedProperty(String* key) {
   // Check access rights if needed.
+  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result;
+  LookupResult result(isolate);
   LocalLookupRealNamedProperty(key, &result);
   return result.IsProperty() && (result.type() != INTERCEPTOR);
 }
@@ -9057,6 +9864,7 @@
   if (this->IsStringObjectWithCharacterAt(index)) return true;
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>(
@@ -9091,7 +9899,7 @@
     }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
-          != SeededNumberDictionary::kNotFound;
+          != NumberDictionary::kNotFound;
     }
     case NON_STRICT_ARGUMENTS_ELEMENTS:
       UNIMPLEMENTED();
@@ -9105,15 +9913,15 @@
 
 bool JSObject::HasRealNamedCallbackProperty(String* key) {
   // Check access rights if needed.
+  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    Heap* heap = GetHeap();
-    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result;
+  LookupResult result(isolate);
   LocalLookupRealNamedProperty(key, &result);
   return result.IsProperty() && (result.type() == CALLBACKS);
 }
@@ -9147,8 +9955,8 @@
   set(j, temp);
   if (this != numbers) {
     temp = numbers->get(i);
-    numbers->set(i, numbers->get(j));
-    numbers->set(j, temp);
+    numbers->set(i, Smi::cast(numbers->get(j)));
+    numbers->set(j, Smi::cast(temp));
   }
 }
 
@@ -9296,6 +10104,7 @@
                                   PropertyAttributes filter) {
   int counter = 0;
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
           Smi::cast(JSArray::cast(this)->length())->value() :
@@ -9359,7 +10168,7 @@
       if (storage != NULL) {
         element_dictionary()->CopyKeysTo(storage,
                                          filter,
-                                         SeededNumberDictionary::SORTED);
+                                         NumberDictionary::SORTED);
       }
       counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
       break;
@@ -9371,11 +10180,9 @@
       if (arguments->IsDictionary()) {
         // Copy the keys from arguments first, because Dictionary::CopyKeysTo
         // will insert in storage starting at index 0.
-        SeededNumberDictionary* dictionary =
-            SeededNumberDictionary::cast(arguments);
+        NumberDictionary* dictionary = NumberDictionary::cast(arguments);
         if (storage != NULL) {
-          dictionary->CopyKeysTo(
-              storage, filter, SeededNumberDictionary::UNSORTED);
+          dictionary->CopyKeysTo(storage, filter, NumberDictionary::UNSORTED);
         }
         counter += dictionary->NumberOfElementsFilterAttributes(filter);
         for (int i = 0; i < mapped_length; ++i) {
@@ -9462,70 +10269,87 @@
  public:
   StringSharedKey(String* source,
                   SharedFunctionInfo* shared,
-                  StrictModeFlag strict_mode)
+                  LanguageMode language_mode,
+                  int scope_position)
       : source_(source),
         shared_(shared),
-        strict_mode_(strict_mode) { }
+        language_mode_(language_mode),
+        scope_position_(scope_position) { }
 
   bool IsMatch(Object* other) {
     if (!other->IsFixedArray()) return false;
-    FixedArray* pair = FixedArray::cast(other);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+    FixedArray* other_array = FixedArray::cast(other);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
     if (shared != shared_) return false;
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
-    if (strict_mode != strict_mode_) return false;
-    String* source = String::cast(pair->get(1));
+    int language_unchecked = Smi::cast(other_array->get(2))->value();
+    ASSERT(language_unchecked == CLASSIC_MODE ||
+           language_unchecked == STRICT_MODE ||
+           language_unchecked == EXTENDED_MODE);
+    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+    if (language_mode != language_mode_) return false;
+    int scope_position = Smi::cast(other_array->get(3))->value();
+    if (scope_position != scope_position_) return false;
+    String* source = String::cast(other_array->get(1));
     return source->Equals(source_);
   }
 
   static uint32_t StringSharedHashHelper(String* source,
                                          SharedFunctionInfo* shared,
-                                         StrictModeFlag strict_mode) {
+                                         LanguageMode language_mode,
+                                         int scope_position) {
     uint32_t hash = source->Hash();
     if (shared->HasSourceCode()) {
       // Instead of using the SharedFunctionInfo pointer in the hash
       // code computation, we use a combination of the hash of the
-      // script source code and the start and end positions.  We do
-      // this to ensure that the cache entries can survive garbage
+      // script source code and the start position of the calling scope.
+      // We do this to ensure that the cache entries can survive garbage
       // collection.
       Script* script = Script::cast(shared->script());
       hash ^= String::cast(script->source())->Hash();
-      if (strict_mode == kStrictMode) hash ^= 0x8000;
-      hash += shared->start_position();
+      if (language_mode == STRICT_MODE) hash ^= 0x8000;
+      if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+      hash += scope_position;
     }
     return hash;
   }
 
   uint32_t Hash() {
-    return StringSharedHashHelper(source_, shared_, strict_mode_);
+    return StringSharedHashHelper(
+        source_, shared_, language_mode_, scope_position_);
   }
 
   uint32_t HashForObject(Object* obj) {
-    FixedArray* pair = FixedArray::cast(obj);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
-    String* source = String::cast(pair->get(1));
-    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
-        Smi::cast(pair->get(2))->value());
-    return StringSharedHashHelper(source, shared, strict_mode);
+    FixedArray* other_array = FixedArray::cast(obj);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
+    String* source = String::cast(other_array->get(1));
+    int language_unchecked = Smi::cast(other_array->get(2))->value();
+    ASSERT(language_unchecked == CLASSIC_MODE ||
+           language_unchecked == STRICT_MODE ||
+           language_unchecked == EXTENDED_MODE);
+    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+    int scope_position = Smi::cast(other_array->get(3))->value();
+    return StringSharedHashHelper(
+        source, shared, language_mode, scope_position);
   }
 
   MUST_USE_RESULT MaybeObject* AsObject() {
     Object* obj;
-    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
+    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
-    FixedArray* pair = FixedArray::cast(obj);
-    pair->set(0, shared_);
-    pair->set(1, source_);
-    pair->set(2, Smi::FromInt(strict_mode_));
-    return pair;
+    FixedArray* other_array = FixedArray::cast(obj);
+    other_array->set(0, shared_);
+    other_array->set(1, source_);
+    other_array->set(2, Smi::FromInt(language_mode_));
+    other_array->set(3, Smi::FromInt(scope_position_));
+    return other_array;
   }
 
  private:
   String* source_;
   SharedFunctionInfo* shared_;
-  StrictModeFlag strict_mode_;
+  LanguageMode language_mode_;
+  int scope_position_;
 };
 
 
@@ -9572,8 +10396,8 @@
 // Utf8SymbolKey carries a vector of chars as key.
 class Utf8SymbolKey : public HashTableKey {
  public:
-  explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed)
-      : string_(string), hash_field_(0), seed_(seed) { }
+  explicit Utf8SymbolKey(Vector<const char> string)
+      : string_(string), hash_field_(0) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsEqualTo(string_);
@@ -9584,7 +10408,7 @@
     unibrow::Utf8InputBuffer<> buffer(string_.start(),
                                       static_cast<unsigned>(string_.length()));
     chars_ = buffer.Length();
-    hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
+    hash_field_ = String::ComputeHashField(&buffer, chars_);
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
@@ -9603,18 +10427,17 @@
   Vector<const char> string_;
   uint32_t hash_field_;
   int chars_;  // Caches the number of characters when computing the hash code.
-  uint32_t seed_;
 };
 
 
 template <typename Char>
 class SequentialSymbolKey : public HashTableKey {
  public:
-  explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed)
-      : string_(string), hash_field_(0), seed_(seed) { }
+  explicit SequentialSymbolKey(Vector<const Char> string)
+      : string_(string), hash_field_(0) { }
 
   uint32_t Hash() {
-    StringHasher hasher(string_.length(), seed_);
+    StringHasher hasher(string_.length());
 
     // Very long strings have a trivial hash that doesn't inspect the
     // string contents.
@@ -9650,15 +10473,14 @@
 
   Vector<const Char> string_;
   uint32_t hash_field_;
-  uint32_t seed_;
 };
 
 
 
 class AsciiSymbolKey : public SequentialSymbolKey<char> {
  public:
-  AsciiSymbolKey(Vector<const char> str, uint32_t seed)
-      : SequentialSymbolKey<char>(str, seed) { }
+  explicit AsciiSymbolKey(Vector<const char> str)
+      : SequentialSymbolKey<char>(str) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsAsciiEqualTo(string_);
@@ -9675,14 +10497,13 @@
  public:
   explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
                                    int from,
-                                   int length,
-                                   uint32_t seed)
-      : string_(string), from_(from), length_(length), seed_(seed) { }
+                                   int length)
+      : string_(string), from_(from), length_(length) { }
 
   uint32_t Hash() {
     ASSERT(length_ >= 0);
     ASSERT(from_ + length_ <= string_->length());
-    StringHasher hasher(length_, string_->GetHeap()->HashSeed());
+    StringHasher hasher(length_);
 
     // Very long strings have a trivial hash that doesn't inspect the
     // string contents.
@@ -9734,14 +10555,13 @@
   int from_;
   int length_;
   uint32_t hash_field_;
-  uint32_t seed_;
 };
 
 
 class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
  public:
-  explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed)
-      : SequentialSymbolKey<uc16>(str, seed) { }
+  explicit TwoByteSymbolKey(Vector<const uc16> str)
+      : SequentialSymbolKey<uc16>(str) { }
 
   bool IsMatch(Object* string) {
     return String::cast(string)->IsTwoByteEqualTo(string_);
@@ -9857,14 +10677,14 @@
     if (element->IsUndefined()) break;  // Empty entry.
     if (key == element) return entry;
     if (!element->IsSymbol() &&
-        !element->IsNull() &&
+        !element->IsTheHole() &&
         String::cast(element)->Equals(key)) {
       // Replace a non-symbol key by the equivalent symbol for faster further
       // lookups.
       set(index, key);
       return entry;
     }
-    ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
+    ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
@@ -9891,7 +10711,7 @@
     uint32_t from_index = EntryToIndex(i);
     Object* k = get(from_index);
     if (IsKey(k)) {
-      uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+      uint32_t hash = Shape::HashForObject(key, k);
       uint32_t insertion_index =
           EntryToIndex(new_table->FindInsertionEntry(hash));
       for (int j = 0; j < Shape::kEntrySize; j++) {
@@ -9968,7 +10788,7 @@
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    if (element->IsUndefined() || element->IsNull()) break;
+    if (element->IsUndefined() || element->IsTheHole()) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -9983,50 +10803,44 @@
 
 template class HashTable<MapCacheShape, HashTableKey*>;
 
-template class HashTable<ObjectHashTableShape, JSObject*>;
+template class HashTable<ObjectHashTableShape<1>, Object*>;
+
+template class HashTable<ObjectHashTableShape<2>, Object*>;
 
 template class Dictionary<StringDictionaryShape, String*>;
 
-template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
+template class Dictionary<NumberDictionaryShape, uint32_t>;
 
-template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    Allocate(int at_least_space_for);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
-    Allocate(int at_least_space_for);
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
+    int);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
     int);
 
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
     uint32_t, Object*);
 
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
-    AtPut(uint32_t, Object*);
-
-template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
-    SlowReverseLookup(Object* value);
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
+    Object*);
 
 template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
     Object*);
 
-template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
+template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
     FixedArray*,
     PropertyAttributes,
-    Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
+    Dictionary<NumberDictionaryShape, uint32_t>::SortMode);
 
 template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
     int, JSObject::DeleteMode);
 
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    DeleteProperty(int, JSObject::DeleteMode);
+template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
+    int, JSObject::DeleteMode);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
     String*);
 
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink(
     uint32_t);
 
 template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
@@ -10045,41 +10859,32 @@
 Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
 
 template int
-Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    NumberOfElementsFilterAttributes(PropertyAttributes);
+Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
+    PropertyAttributes);
 
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add(
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Add(
     uint32_t, Object*, PropertyDetails);
 
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add(
-    uint32_t, Object*, PropertyDetails);
-
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    EnsureCapacity(int, uint32_t);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::
     EnsureCapacity(int, uint32_t);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::
     EnsureCapacity(int, String*);
 
-template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
-
-template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
-    AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
+    uint32_t, Object*, PropertyDetails, uint32_t);
 
 template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
     String*, Object*, PropertyDetails, uint32_t);
 
 template
-int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
 
 template
 int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
 
 template
-int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
 
 
 // Collates undefined and unexisting elements below limit from position
@@ -10089,7 +10894,7 @@
   // Must stay in dictionary mode, either because of requires_slow_elements,
   // or because we are not going to sort (and therefore compact) all of the
   // elements.
-  SeededNumberDictionary* dict = element_dictionary();
+  NumberDictionary* dict = element_dictionary();
   HeapNumber* result_double = NULL;
   if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
     // Allocate space for result before we start mutating the object.
@@ -10102,10 +10907,10 @@
 
   Object* obj;
   { MaybeObject* maybe_obj =
-        SeededNumberDictionary::Allocate(dict->NumberOfElements());
+        NumberDictionary::Allocate(dict->NumberOfElements());
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
+  NumberDictionary* new_dict = NumberDictionary::cast(obj);
 
   AssertNoAllocation no_alloc;
 
@@ -10184,14 +10989,12 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  ASSERT(!HasExternalArrayElements());
-
   Heap* heap = GetHeap();
 
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
-    SeededNumberDictionary* dict = element_dictionary();
+    NumberDictionary* dict = element_dictionary();
     if (IsJSArray() || dict->requires_slow_elements() ||
         dict->max_number_key() >= limit) {
       return PrepareSlowElementsForSort(limit);
@@ -10199,7 +11002,7 @@
     // Convert to fast elements.
 
     Object* obj;
-    { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+    { MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     Map* new_map = Map::cast(obj);
@@ -10215,13 +11018,16 @@
 
     set_map(new_map);
     set_elements(fast_elements);
+  } else if (HasExternalArrayElements()) {
+    // External arrays cannot have holes or undefined elements.
+    return Smi::FromInt(ExternalArray::cast(elements())->length());
   } else if (!HasFastDoubleElements()) {
     Object* obj;
     { MaybeObject* maybe_obj = EnsureWritableFastElements();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
   }
-  ASSERT(HasFastElements() || HasFastDoubleElements());
+  ASSERT(HasFastTypeElements() || HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
@@ -10490,6 +11296,16 @@
 }
 
 
+Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
+    Handle<GlobalObject> global,
+    Handle<String> name) {
+  Isolate* isolate = global->GetIsolate();
+  CALL_HEAP_FUNCTION(isolate,
+                     global->EnsurePropertyCell(*name),
+                     JSGlobalPropertyCell);
+}
+
+
 MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
   ASSERT(!HasFastProperties());
   int entry = property_dictionary()->FindEntry(name);
@@ -10530,12 +11346,10 @@
 // algorithm.
 class TwoCharHashTableKey : public HashTableKey {
  public:
-  TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
+  TwoCharHashTableKey(uint32_t c1, uint32_t c2)
     : c1_(c1), c2_(c2) {
     // Char 1.
-    uint32_t hash = seed;
-    hash += c1;
-    hash += hash << 10;
+    uint32_t hash = c1 + (c1 << 10);
     hash ^= hash >> 6;
     // Char 2.
     hash += c2;
@@ -10545,9 +11359,9 @@
     hash += hash << 3;
     hash ^= hash >> 11;
     hash += hash << 15;
-    if ((hash & String::kHashBitMask) == 0) hash = String::kZeroHash;
+    if (hash == 0) hash = 27;
 #ifdef DEBUG
-    StringHasher hasher(2, seed);
+    StringHasher hasher(2);
     hasher.AddCharacter(c1);
     hasher.AddCharacter(c2);
     // If this assert fails then we failed to reproduce the two-character
@@ -10604,7 +11418,7 @@
 bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
                                                uint32_t c2,
                                                String** symbol) {
-  TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
+  TwoCharHashTableKey key(c1, c2);
   int entry = FindEntry(&key);
   if (entry == kNotFound) {
     return false;
@@ -10617,16 +11431,15 @@
 }
 
 
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
-                                       Object** s) {
-  Utf8SymbolKey key(str, GetHeap()->HashSeed());
+MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
+  Utf8SymbolKey key(str);
   return LookupKey(&key, s);
 }
 
 
 MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
                                             Object** s) {
-  AsciiSymbolKey key(str, GetHeap()->HashSeed());
+  AsciiSymbolKey key(str);
   return LookupKey(&key, s);
 }
 
@@ -10635,14 +11448,14 @@
                                                      int from,
                                                      int length,
                                                      Object** s) {
-  SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
+  SubStringAsciiSymbolKey key(str, from, length);
   return LookupKey(&key, s);
 }
 
 
 MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
                                               Object** s) {
-  TwoByteSymbolKey key(str, GetHeap()->HashSeed());
+  TwoByteSymbolKey key(str);
   return LookupKey(&key, s);
 }
 
@@ -10691,8 +11504,12 @@
 
 Object* CompilationCacheTable::LookupEval(String* src,
                                           Context* context,
-                                          StrictModeFlag strict_mode) {
-  StringSharedKey key(src, context->closure()->shared(), strict_mode);
+                                          LanguageMode language_mode,
+                                          int scope_position) {
+  StringSharedKey key(src,
+                      context->closure()->shared(),
+                      language_mode,
+                      scope_position);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
@@ -10727,10 +11544,12 @@
 
 MaybeObject* CompilationCacheTable::PutEval(String* src,
                                             Context* context,
-                                            SharedFunctionInfo* value) {
+                                            SharedFunctionInfo* value,
+                                            int scope_position) {
   StringSharedKey key(src,
                       context->closure()->shared(),
-                      value->strict_mode() ? kStrictMode : kNonStrictMode);
+                      value->language_mode(),
+                      scope_position);
   Object* obj;
   { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -10774,13 +11593,13 @@
 
 
 void CompilationCacheTable::Remove(Object* value) {
-  Object* null_value = GetHeap()->null_value();
+  Object* the_hole_value = GetHeap()->the_hole_value();
   for (int entry = 0, size = Capacity(); entry < size; entry++) {
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
     if (get(value_index) == value) {
-      fast_set(this, entry_index, null_value);
-      fast_set(this, value_index, null_value);
+      NoWriteBarrierSet(this, entry_index, the_hole_value);
+      NoWriteBarrierSet(this, value_index, the_hole_value);
       ElementRemoved();
     }
   }
@@ -10933,30 +11752,6 @@
 }
 
 
-void SeededNumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
-  // Do nothing if the interval [from, to) is empty.
-  if (from >= to) return;
-
-  Heap* heap = GetHeap();
-  int removed_entries = 0;
-  Object* sentinel = heap->null_value();
-  int capacity = Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* key = KeyAt(i);
-    if (key->IsNumber()) {
-      uint32_t number = static_cast<uint32_t>(key->Number());
-      if (from <= number && number < to) {
-        SetEntry(i, sentinel, sentinel);
-        removed_entries++;
-      }
-    }
-  }
-
-  // Update the number of elements.
-  ElementsRemoved(removed_entries);
-}
-
-
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
                                                JSReceiver::DeleteMode mode) {
@@ -10966,7 +11761,7 @@
   if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->null_value(), heap->null_value());
+  SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
@@ -10999,9 +11794,8 @@
     if (!maybe_k->ToObject(&k)) return maybe_k;
   }
   PropertyDetails details = PropertyDetails(NONE, NORMAL);
-
-  return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
-      Dictionary<Shape, Key>::Hash(key));
+  return Dictionary<Shape, Key>::cast(obj)->
+      AddEntry(key, value, details, Shape::Hash(key));
 }
 
 
@@ -11016,9 +11810,8 @@
   { MaybeObject* maybe_obj = EnsureCapacity(1, key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-
-  return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
-      Dictionary<Shape, Key>::Hash(key));
+  return Dictionary<Shape, Key>::cast(obj)->
+      AddEntry(key, value, details, Shape::Hash(key));
 }
 
 
@@ -11051,7 +11844,7 @@
 }
 
 
-void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
   // If the dictionary requires slow elements an element has already
   // been added at a high index.
   if (requires_slow_elements()) return;
@@ -11070,44 +11863,31 @@
 }
 
 
-MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
-                                                    Object* value,
-                                                    PropertyDetails details) {
+MaybeObject* NumberDictionary::AddNumberEntry(uint32_t key,
+                                              Object* value,
+                                              PropertyDetails details) {
   UpdateMaxNumberKey(key);
   SLOW_ASSERT(this->FindEntry(key) == kNotFound);
   return Add(key, value, details);
 }
 
 
-MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
-                                                      Object* value) {
-  SLOW_ASSERT(this->FindEntry(key) == kNotFound);
-  return Add(key, value, PropertyDetails(NONE, NORMAL));
-}
-
-
-MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) {
+MaybeObject* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
   UpdateMaxNumberKey(key);
   return AtPut(key, value);
 }
 
 
-MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key,
-                                                   Object* value) {
-  return AtPut(key, value);
-}
-
-
-MaybeObject* SeededNumberDictionary::Set(uint32_t key,
-                                         Object* value,
-                                         PropertyDetails details) {
+MaybeObject* NumberDictionary::Set(uint32_t key,
+                                   Object* value,
+                                   PropertyDetails details) {
   int entry = FindEntry(key);
   if (entry == kNotFound) return AddNumberEntry(key, value, details);
   // Preserve enumeration index.
   details = PropertyDetails(details.attributes(),
                             details.type(),
                             DetailsAt(entry).index());
-  MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
+  MaybeObject* maybe_object_key = NumberDictionaryShape::AsObject(key);
   Object* object_key;
   if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
   SetEntry(entry, object_key, value, details);
@@ -11115,18 +11895,6 @@
 }
 
 
-MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
-                                           Object* value) {
-  int entry = FindEntry(key);
-  if (entry == kNotFound) return AddNumberEntry(key, value);
-  MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key);
-  Object* object_key;
-  if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
-  SetEntry(entry, object_key, value);
-  return this;
-}
-
-
 
 template<typename Shape, typename Key>
 int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
@@ -11279,14 +12047,15 @@
   }
 
   // Allocate the instance descriptor.
-  Object* descriptors_unchecked;
-  { MaybeObject* maybe_descriptors_unchecked =
+  DescriptorArray* descriptors;
+  { MaybeObject* maybe_descriptors =
         DescriptorArray::Allocate(instance_descriptor_length);
-    if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
-      return maybe_descriptors_unchecked;
+    if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
+      return maybe_descriptors;
     }
   }
-  DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
+
+  DescriptorArray::WhitenessWitness witness(descriptors);
 
   int inobject_props = obj->map()->inobject_properties();
   int number_of_allocated_fields =
@@ -11324,7 +12093,7 @@
                                      JSFunction::cast(value),
                                      details.attributes(),
                                      details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else if (type == NORMAL) {
         if (current_offset < inobject_props) {
           obj->InObjectPropertyAtPut(current_offset,
@@ -11338,13 +12107,13 @@
                           current_offset++,
                           details.attributes(),
                           details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else if (type == CALLBACKS) {
         CallbacksDescriptor d(String::cast(key),
                               value,
                               details.attributes(),
                               details.index());
-        descriptors->Set(next_descriptor++, &d);
+        descriptors->Set(next_descriptor++, &d, witness);
       } else {
         UNREACHABLE();
       }
@@ -11352,7 +12121,7 @@
   }
   ASSERT(current_offset == number_of_fields);
 
-  descriptors->Sort();
+  descriptors->Sort(witness);
   // Allocate new map.
   Object* new_map;
   { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
@@ -11375,20 +12144,84 @@
 }
 
 
-Object* ObjectHashTable::Lookup(JSObject* key) {
+bool ObjectHashSet::Contains(Object* key) {
+  ASSERT(IsKey(key));
+
   // If the object does not have an identity hash, it was never used as a key.
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
-  if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
+  }
+  return (FindEntry(key) != kNotFound);
+}
+
+
+MaybeObject* ObjectHashSet::Add(Object* key) {
+  ASSERT(IsKey(key));
+
+  // Make sure the key object has an identity hash code.
+  int hash;
+  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
+    if (maybe_hash->IsFailure()) return maybe_hash;
+    hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
+  }
+  int entry = FindEntry(key);
+
+  // Check whether key is already present.
+  if (entry != kNotFound) return this;
+
+  // Check whether the hash set should be extended and add entry.
+  Object* obj;
+  { MaybeObject* maybe_obj = EnsureCapacity(1, key);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  ObjectHashSet* table = ObjectHashSet::cast(obj);
+  entry = table->FindInsertionEntry(hash);
+  table->set(EntryToIndex(entry), key);
+  table->ElementAdded();
+  return table;
+}
+
+
+MaybeObject* ObjectHashSet::Remove(Object* key) {
+  ASSERT(IsKey(key));
+
+  // If the object does not have an identity hash, it was never used as a key.
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
+  }
+  int entry = FindEntry(key);
+
+  // Check whether key is actually present.
+  if (entry == kNotFound) return this;
+
+  // Remove entry and try to shrink this hash set.
+  set_the_hole(EntryToIndex(entry));
+  ElementRemoved();
+  return Shrink(key);
+}
+
+
+Object* ObjectHashTable::Lookup(Object* key) {
+  ASSERT(IsKey(key));
+
+  // If the object does not have an identity hash, it was never used as a key.
+  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
+    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
+      return GetHeap()->undefined_value();
+    }
+  }
   int entry = FindEntry(key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
 
-MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
+  ASSERT(IsKey(key));
+
   // Make sure the key object has an identity hash code.
   int hash;
-  { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
     if (maybe_hash->IsFailure()) return maybe_hash;
     hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
   }
@@ -11418,16 +12251,16 @@
 }
 
 
-void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
   set(EntryToIndex(entry), key);
   set(EntryToIndex(entry) + 1, value);
   ElementAdded();
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
-  set_null(heap, EntryToIndex(entry));
-  set_null(heap, EntryToIndex(entry) + 1);
+void ObjectHashTable::RemoveEntry(int entry) {
+  set_the_hole(EntryToIndex(entry));
+  set_the_hole(EntryToIndex(entry) + 1);
   ElementRemoved();
 }
 
@@ -11682,7 +12515,7 @@
   // Multiple break points.
   return FixedArray::cast(break_point_objects())->length();
 }
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 } }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index 1245ed0..49aa2f7 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -31,6 +31,7 @@
 #include "allocation.h"
 #include "builtins.h"
 #include "list.h"
+#include "property-details.h"
 #include "smart-array-pointer.h"
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
@@ -38,6 +39,8 @@
 #elif V8_TARGET_ARCH_MIPS
 #include "mips/constants-mips.h"
 #endif
+#include "v8checks.h"
+
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -51,6 +54,8 @@
 //       - JSReceiver  (suitable for property access)
 //         - JSObject
 //           - JSArray
+//           - JSSet
+//           - JSMap
 //           - JSWeakMap
 //           - JSRegExp
 //           - JSFunction
@@ -74,7 +79,7 @@
 //             - MapCache
 //           - Context
 //           - JSFunctionResultCache
-//           - SerializedScopeInfo
+//           - ScopeInfo
 //         - FixedDoubleArray
 //         - ExternalArray
 //           - ExternalPixelArray
@@ -120,24 +125,17 @@
 //  HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
 //  Failure:    [30 bit signed int] 11
 
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
-  NONE              = v8::None,
-  READ_ONLY         = v8::ReadOnly,
-  DONT_ENUM         = v8::DontEnum,
-  DONT_DELETE       = v8::DontDelete,
-  ABSENT            = 16  // Used in runtime to indicate a property is absent.
-  // ABSENT can never be stored in or returned from a descriptor's attributes
-  // bitfield.  It is only used as a return value meaning the attributes of
-  // a non-existent property.
-};
-
 namespace v8 {
 namespace internal {
 
 enum ElementsKind {
-  // The "fast" kind for tagged values. Must be first to make it possible
-  // to efficiently check maps if they have fast elements.
+  // The "fast" kind for elements that only contain SMI values. Must be first
+  // to make it possible to efficiently check maps for this kind.
+  FAST_SMI_ONLY_ELEMENTS,
+
+  // The "fast" kind for tagged values. Must be second to make it possible to
+  // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+  // together at once.
   FAST_ELEMENTS,
 
   // The "fast" kind for unwrapped, non-tagged double values.
@@ -160,101 +158,16 @@
   // Derived constants from ElementsKind
   FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
   LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
-  FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+  FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
   LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
 };
 
-static const int kElementsKindCount =
-    LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
 
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  int index = 0) {
-    ASSERT(type != ELEMENTS_TRANSITION);
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(index));
+void PrintElementsKind(FILE* out, ElementsKind kind);
 
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(index);
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(index == this->index());
-  }
-
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  ElementsKind elements_kind) {
-    ASSERT(type == ELEMENTS_TRANSITION);
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
-
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(static_cast<int>(elements_kind));
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(elements_kind == this->elements_kind());
-  }
-
-  // Conversion for storing details as Object*.
-  explicit inline PropertyDetails(Smi* smi);
-  inline Smi* AsSmi();
-
-  PropertyType type() { return TypeField::decode(value_); }
-
-  bool IsTransition() {
-    PropertyType t = type();
-    ASSERT(t != INTERCEPTOR);
-    return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
-        t == ELEMENTS_TRANSITION;
-  }
-
-  bool IsProperty() {
-    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
-  }
-
-  PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
-  int index() { return StorageField::decode(value_); }
-
-  ElementsKind elements_kind() {
-    ASSERT(type() == ELEMENTS_TRANSITION);
-    return static_cast<ElementsKind>(StorageField::decode(value_));
-  }
-
-  inline PropertyDetails AsDeleted();
-
-  static bool IsValidIndex(int index) {
-    return StorageField::is_valid(index);
-  }
-
-  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
-  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
-  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
-  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
-  // Bit fields in value_ (type, shift, size). Must be public so the
-  // constants can be embedded in generated code.
-  class TypeField:       public BitField<PropertyType,       0, 4> {};
-  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
-  class DeletedField:    public BitField<uint32_t,           7, 1> {};
-  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
-
-  static const int kInitialIndex = 1;
-
- private:
-  uint32_t value_;
-};
-
+inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
+                                                ElementsKind to_kind);
 
 // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
 enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -276,8 +189,15 @@
 };
 
 
+// Indicates whether a get method should implicitly create the object looked up.
+enum CreationFlag {
+  ALLOW_CREATION,
+  OMIT_CREATION
+};
+
+
 // Instance size sentinel for objects of variable size.
-static const int kVariableSizeSentinel = 0;
+const int kVariableSizeSentinel = 0;
 
 
 // All Maps have a field instance_type containing a InstanceType.
@@ -311,6 +231,9 @@
   V(EXTERNAL_SYMBOL_TYPE)                                                      \
   V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_SYMBOL_TYPE)                                                \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE)                                                \
+  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE)                                          \
   V(STRING_TYPE)                                                               \
   V(ASCII_STRING_TYPE)                                                         \
   V(CONS_STRING_TYPE)                                                          \
@@ -319,6 +242,9 @@
   V(EXTERNAL_STRING_TYPE)                                                      \
   V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_STRING_TYPE)                                                \
+  V(SHORT_EXTERNAL_STRING_TYPE)                                                \
+  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE)                                          \
   V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE)                                        \
                                                                                \
   V(MAP_TYPE)                                                                  \
@@ -329,6 +255,7 @@
   V(HEAP_NUMBER_TYPE)                                                          \
   V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
+  V(FREE_SPACE_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
   /* Object::IsExternalArray(). */                                             \
@@ -418,6 +345,18 @@
     ExternalAsciiString::kSize,                                                \
     external_ascii_symbol,                                                     \
     ExternalAsciiSymbol)                                                       \
+  V(SHORT_EXTERNAL_SYMBOL_TYPE,                                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_symbol,                                                     \
+    ShortExternalSymbol)                                                       \
+  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE,                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_symbol_with_ascii_data,                                     \
+    ShortExternalSymbolWithAsciiData)                                          \
+  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE,                                          \
+    ExternalAsciiString::kShortSize,                                           \
+    short_external_ascii_symbol,                                               \
+    ShortExternalAsciiSymbol)                                                  \
   V(STRING_TYPE,                                                               \
     kVariableSizeSentinel,                                                     \
     string,                                                                    \
@@ -453,7 +392,19 @@
   V(EXTERNAL_ASCII_STRING_TYPE,                                                \
     ExternalAsciiString::kSize,                                                \
     external_ascii_string,                                                     \
-    ExternalAsciiString)
+    ExternalAsciiString)                                                       \
+  V(SHORT_EXTERNAL_STRING_TYPE,                                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_string,                                                     \
+    ShortExternalString)                                                       \
+  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE,                                \
+    ExternalTwoByteString::kShortSize,                                         \
+    short_external_string_with_ascii_data,                                     \
+    ShortExternalStringWithAsciiData)                                          \
+  V(SHORT_EXTERNAL_ASCII_STRING_TYPE,                                          \
+    ExternalAsciiString::kShortSize,                                           \
+    short_external_ascii_string,                                               \
+    ShortExternalAsciiString)
 
 // A struct is a simple object a set of object-valued fields.  Including an
 // object type in this causes the compiler to generate most of the boilerplate
@@ -537,6 +488,11 @@
 const uint32_t kAsciiDataHintMask = 0x08;
 const uint32_t kAsciiDataHintTag = 0x08;
 
+// If bit 7 is clear and string representation indicates an external string,
+// then bit 4 indicates whether the data pointer is cached.
+const uint32_t kShortExternalStringMask = 0x10;
+const uint32_t kShortExternalStringTag = 0x10;
+
 
 // A ConsString with an empty string as the right side is a candidate
 // for being shortcut by the garbage collector unless it is a
@@ -556,6 +512,13 @@
   ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
   CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
   CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+  SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
+                               kExternalStringTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
+      kTwoByteStringTag | kSymbolTag | kExternalStringTag |
+      kAsciiDataHintTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
+                                     kSymbolTag | kShortExternalStringTag,
   EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
   EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
@@ -567,6 +530,13 @@
   CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
   SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
   SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
+  SHORT_EXTERNAL_STRING_TYPE =
+      kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+      kTwoByteStringTag | kExternalStringTag |
+      kAsciiDataHintTag | kShortExternalStringTag,
+  SHORT_EXTERNAL_ASCII_STRING_TYPE =
+      kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
   EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
   EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -585,6 +555,7 @@
   HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
+  FREE_SPACE_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
   EXTERNAL_SHORT_ARRAY_TYPE,
@@ -621,24 +592,32 @@
 
   JS_MESSAGE_OBJECT_TYPE,
 
-  JS_VALUE_TYPE,  // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
+  // All the following types are subtypes of JSReceiver, which corresponds to
+  // objects in the JS sense. The first and the last type in this range are
+  // the two forms of function. This organization enables using the same
+  // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
+  // NONCALLABLE_JS_OBJECT range.
+  JS_FUNCTION_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
+  JS_PROXY_TYPE,  // LAST_JS_PROXY_TYPE
+
+  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
-  JS_PROXY_TYPE,
+  JS_SET_TYPE,
+  JS_MAP_TYPE,
   JS_WEAK_MAP_TYPE,
 
-  JS_REGEXP_TYPE,  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
+  JS_REGEXP_TYPE,
 
-  JS_FUNCTION_TYPE,  // FIRST_CALLABLE_SPEC_OBJECT_TYPE
-  JS_FUNCTION_PROXY_TYPE,  // LAST_CALLABLE_SPEC_OBJECT_TYPE
+  JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
   // Pseudo-types
   FIRST_TYPE = 0x0,
-  LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_TYPE = JS_FUNCTION_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
   // Boundaries for testing for an external array.
@@ -651,21 +630,27 @@
   // are not continuous in this enum! The enum ranges instead reflect the
   // external class names, where proxies are treated as either ordinary objects,
   // or functions.
-  FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+  FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
   LAST_JS_RECEIVER_TYPE = LAST_TYPE,
-  // Boundaries for testing the types for which typeof is "object".
-  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
-  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
-  // Boundaries for testing the types for which typeof is "function".
-  FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
-  LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+  // Boundaries for testing the types represented as JSObject
+  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+  LAST_JS_OBJECT_TYPE = LAST_TYPE,
+  // Boundaries for testing the types represented as JSProxy
+  FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
   // Boundaries for testing whether the type is a JavaScript object.
-  FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
-  LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
+  FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
+  LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
+  // Boundaries for testing the types for which typeof is "object".
+  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
+  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+  // Note that the types for which typeof is "function" are not continuous.
+  // Define this so that we can put assertions on discrete checks.
+  NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
 };
 
-static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
-    FIRST_EXTERNAL_ARRAY_TYPE + 1;
+const int kExternalArrayTypeCount =
+    LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
@@ -697,6 +682,7 @@
 class FixedArrayBase;
 class ObjectVisitor;
 class StringStream;
+class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -710,7 +696,6 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
-class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -748,7 +733,7 @@
   // Prints this object with details.
   inline void Print() {
     Print(stdout);
-  };
+  }
   inline void PrintLn() {
     PrintLn(stdout);
   }
@@ -791,6 +776,7 @@
   V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
+  V(FreeSpace)                                 \
   V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
@@ -802,7 +788,7 @@
   V(FixedDoubleArray)                          \
   V(Context)                                   \
   V(GlobalContext)                             \
-  V(SerializedScopeInfo)                       \
+  V(ScopeInfo)                                 \
   V(JSFunction)                                \
   V(Code)                                      \
   V(Oddball)                                   \
@@ -815,6 +801,8 @@
   V(JSArray)                                   \
   V(JSProxy)                                   \
   V(JSFunctionProxy)                           \
+  V(JSSet)                                     \
+  V(JSMap)                                     \
   V(JSWeakMap)                                 \
   V(JSRegExp)                                  \
   V(HashTable)                                 \
@@ -835,6 +823,9 @@
   V(AccessCheckNeeded)                         \
   V(JSGlobalPropertyCell)                      \
 
+
+class JSReceiver;
+
 // Object is the abstract superclass for all classes in the
 // object hierarchy.
 // Object does not use any virtual functions to avoid the
@@ -849,6 +840,8 @@
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
+  inline bool IsFixedArrayBase();
+
   // Returns true if this object is an instance of the specified
   // function template.
   inline bool IsInstanceOf(FunctionTemplateInfo* type);
@@ -859,6 +852,7 @@
 #undef DECLARE_STRUCT_PREDICATE
 
   INLINE(bool IsSpecObject());
+  INLINE(bool IsSpecFunction());
 
   // Oddball testing.
   INLINE(bool IsUndefined());
@@ -867,6 +861,10 @@
   INLINE(bool IsTrue());
   INLINE(bool IsFalse());
   inline bool IsArgumentsMarker();
+  inline bool NonFailureIsHeapObject();
+
+  // Filler objects (fillers and free space objects).
+  inline bool IsFiller();
 
   // Extract the number.
   inline double Number();
@@ -899,20 +897,22 @@
       Object* receiver,
       String* key,
       PropertyAttributes* attributes);
+
+  static Handle<Object> GetProperty(Handle<Object> object,
+                                    Handle<Object> receiver,
+                                    LookupResult* result,
+                                    Handle<String> key,
+                                    PropertyAttributes* attributes);
+
   MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
                                            LookupResult* result,
                                            String* key,
                                            PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
-                                                       Object* structure,
-                                                       String* name,
-                                                       Object* holder);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
-                                                      String* name,
-                                                      Object* handler);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
-                                                            JSFunction* getter);
 
+  MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
+                                                            JSReceiver* getter);
+
+  static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
   inline MaybeObject* GetElement(uint32_t index);
   // For use when we know that no exception can be thrown.
   inline Object* GetElementNoExceptionThrown(uint32_t index);
@@ -921,6 +921,16 @@
   // Return the object's prototype (might be Heap::null_value()).
   Object* GetPrototype();
 
+  // Returns the permanent hash code associated with this object depending on
+  // the actual object type.  Might return a failure in case no hash was
+  // created yet or GC was caused by creation.
+  MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
+
+  // Checks whether this object has the same value as the given one.  This
+  // function is implemented according to ES5, section 9.12 and can be used
+  // to implement the Harmony "egal" function.
+  bool SameValue(Object* other);
+
   // Tries to convert an object to an array index.  Returns true and sets
   // the output parameter if it succeeds.
   inline bool ToArrayIndex(uint32_t* index);
@@ -986,7 +996,8 @@
   void SmiVerify();
 #endif
 
-  static const int kMinValue = (-1 << (kSmiValueSize - 1));
+  static const int kMinValue =
+      (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
   static const int kMaxValue = -(kMinValue + 1);
 
  private:
@@ -1095,101 +1106,13 @@
   // View this map word as a forwarding address.
   inline HeapObject* ToForwardingAddress();
 
-  // Marking phase of full collection: the map word of live objects is
-  // marked, and may be marked as overflowed (eg, the object is live, its
-  // children have not been visited, and it does not fit in the marking
-  // stack).
+  static inline MapWord FromRawValue(uintptr_t value) {
+    return MapWord(value);
+  }
 
-  // True if this map word's mark bit is set.
-  inline bool IsMarked();
-
-  // Return this map word but with its mark bit set.
-  inline void SetMark();
-
-  // Return this map word but with its mark bit cleared.
-  inline void ClearMark();
-
-  // True if this map word's overflow bit is set.
-  inline bool IsOverflowed();
-
-  // Return this map word but with its overflow bit set.
-  inline void SetOverflow();
-
-  // Return this map word but with its overflow bit cleared.
-  inline void ClearOverflow();
-
-
-  // Compacting phase of a full compacting collection: the map word of live
-  // objects contains an encoding of the original map address along with the
-  // forwarding address (represented as an offset from the first live object
-  // in the same page as the (old) object address).
-
-  // Create a map word from a map address and a forwarding address offset.
-  static inline MapWord EncodeAddress(Address map_address, int offset);
-
-  // Return the map address encoded in this map word.
-  inline Address DecodeMapAddress(MapSpace* map_space);
-
-  // Return the forwarding offset encoded in this map word.
-  inline int DecodeOffset();
-
-
-  // During serialization: the map word is used to hold an encoded
-  // address, and possibly a mark bit (set and cleared with SetMark
-  // and ClearMark).
-
-  // Create a map word from an encoded address.
-  static inline MapWord FromEncodedAddress(Address address);
-
-  inline Address ToEncodedAddress();
-
-  // Bits used by the marking phase of the garbage collector.
-  //
-  // The first word of a heap object is normally a map pointer. The last two
-  // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
-  // mark an object as live and/or overflowed:
-  //   last bit = 0, marked as alive
-  //   second bit = 1, overflowed
-  // An object is only marked as overflowed when it is marked as live while
-  // the marking stack is overflowed.
-  static const int kMarkingBit = 0;  // marking bit
-  static const int kMarkingMask = (1 << kMarkingBit);  // marking mask
-  static const int kOverflowBit = 1;  // overflow bit
-  static const int kOverflowMask = (1 << kOverflowBit);  // overflow mask
-
-  // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
-  // used.
-  // +-----------------+------------------+-----------------+
-  // |forwarding offset|page offset of map|page index of map|
-  // +-----------------+------------------+-----------------+
-  //          ^                 ^                  ^
-  //          |                 |                  |
-  //          |                 |          kMapPageIndexBits
-  //          |         kMapPageOffsetBits
-  // kForwardingOffsetBits
-  static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
-  static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
-  static const int kMapPageIndexBits = 16;
-#else
-  // Use all the 32-bits to encode on a 32-bit platform.
-  static const int kMapPageIndexBits =
-      32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
-
-  static const int kMapPageIndexShift = 0;
-  static const int kMapPageOffsetShift =
-      kMapPageIndexShift + kMapPageIndexBits;
-  static const int kForwardingOffsetShift =
-      kMapPageOffsetShift + kMapPageOffsetBits;
-
-  // Bit masks covering the different parts the encoding.
-  static const uintptr_t kMapPageIndexMask =
-      (1 << kMapPageOffsetShift) - 1;
-  static const uintptr_t kMapPageOffsetMask =
-      ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
-  static const uintptr_t kForwardingOffsetMask =
-      ~(kMapPageIndexMask | kMapPageOffsetMask);
+  inline uintptr_t ToRawValue() {
+    return value_;
+  }
 
  private:
   // HeapObject calls the private constructor and directly reads the value.
@@ -1209,6 +1132,7 @@
   // information.
   inline Map* map();
   inline void set_map(Map* value);
+  inline void set_map_unsafe(Map* value);
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
@@ -1216,8 +1140,8 @@
   inline void set_map_word(MapWord map_word);
 
   // The Heap the object was allocated in. Used also to access Isolate.
-  // This method can not be used during GC, it ASSERTs this.
   inline Heap* GetHeap();
+
   // Convenience method to get current isolate. This method can be
   // accessed only when its result is the same as
   // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1246,31 +1170,6 @@
   // GC internal.
   inline int SizeFromMap(Map* map);
 
-  // Support for the marking heap objects during the marking phase of GC.
-  // True if the object is marked live.
-  inline bool IsMarked();
-
-  // Mutate this object's map pointer to indicate that the object is live.
-  inline void SetMark();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is live (ie, partially restore the map pointer).
-  inline void ClearMark();
-
-  // True if this object is marked as overflowed.  Overflowed objects have
-  // been reached and marked during marking of the heap, but their children
-  // have not necessarily been marked and they have not been pushed on the
-  // marking stack.
-  inline bool IsOverflowed();
-
-  // Mutate this object's map pointer to indicate that the object is
-  // overflowed.
-  inline void SetOverflow();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is overflowed (ie, partially restore the map pointer).
-  inline void ClearOverflow();
-
   // Returns the field at offset in obj, as a read/write Object* reference.
   // Does no checking, and is safe to use during GC, while maps are invalid.
   // Does not invoke write barrier, so should only be assigned to
@@ -1294,18 +1193,14 @@
     HeapObjectPrint(stdout);
   }
   void HeapObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void HeapObjectVerify();
-  inline void VerifyObjectField(int offset);
-  inline void VerifySmiField(int offset);
-#endif
-
-#ifdef OBJECT_PRINT
   void PrintHeader(FILE* out, const char* id);
 #endif
 
 #ifdef DEBUG
+  void HeapObjectVerify();
+  inline void VerifyObjectField(int offset);
+  inline void VerifySmiField(int offset);
+
   // Verify a pointer is a valid HeapObject pointer that points to object
   // areas in the heap.
   static void VerifyHeapPointer(Object* p);
@@ -1448,8 +1343,21 @@
                                            Object* value,
                                            PropertyAttributes attributes,
                                            StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                            Object* value);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+
+  // Set the index'th array element.
+  // Can cause GC, or return failure if GC is required.
+  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
+                                          Object* value,
+                                          StrictModeFlag strict_mode,
+                                          bool check_prototype);
+
+  // Tests for the fast common case for property enumeration.
+  bool IsSimpleEnum();
 
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
@@ -1466,6 +1374,7 @@
   // Can cause a GC.
   inline bool HasProperty(String* name);
   inline bool HasLocalProperty(String* name);
+  inline bool HasElement(uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   inline Object* GetPrototype();
@@ -1474,11 +1383,18 @@
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
                                             bool skip_hidden_prototypes);
 
+  // Retrieves a permanent object identity hash code. The undefined value might
+  // be returned in case no hash was created yet and OMIT_CREATION was used.
+  inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
   void LocalLookup(String* name, LookupResult* result);
   void Lookup(String* name, LookupResult* result);
 
+ protected:
+  Smi* GenerateIdentityHash();
+
  private:
   PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
                                           LookupResult* result,
@@ -1525,8 +1441,14 @@
   MUST_USE_RESULT inline MaybeObject* ResetElements();
   inline ElementsKind GetElementsKind();
   inline ElementsAccessor* GetElementsAccessor();
+  inline bool HasFastSmiOnlyElements();
   inline bool HasFastElements();
+  // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
+  // elements.  TODO(danno): Rename HasFastTypeElements to HasFastElements() and
+  // HasFastElements to HasFastObjectElements.
+  inline bool HasFastTypeElements();
   inline bool HasFastDoubleElements();
+  inline bool HasNonStrictArgumentsElements();
   inline bool HasDictionaryElements();
   inline bool HasExternalPixelElements();
   inline bool HasExternalArrayElements();
@@ -1541,7 +1463,7 @@
   bool HasFastArgumentsElements();
   bool HasDictionaryArgumentsElements();
   inline bool AllowsSetElementsLength();
-  inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
+  inline NumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Requires: HasFastElements().
   MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
@@ -1554,6 +1476,11 @@
   // a dictionary, and it will stay a dictionary.
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
+  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+                                                       Object* structure,
+                                                       String* name);
+
+  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
                                            String* key,
                                            Object* value,
@@ -1571,8 +1498,6 @@
       Object* value,
       JSObject* holder,
       StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                            Object* value);
   MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
       String* name,
       Object* value,
@@ -1660,43 +1585,44 @@
   // Accessors for hidden properties object.
   //
   // Hidden properties are not local properties of the object itself.
-  // Instead they are stored on an auxiliary JSObject stored as a local
+  // Instead they are stored in an auxiliary structure kept as a local
   // property with a special name Heap::hidden_symbol(). But if the
   // receiver is a JSGlobalProxy then the auxiliary object is a property
-  // of its prototype.
-  //
-  // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
-  // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
-  // holder.
-  //
-  // These accessors do not touch interceptors or accessors.
-  inline bool HasHiddenPropertiesObject();
-  inline Object* GetHiddenPropertiesObject();
-  MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
-      Object* hidden_obj);
+  // of its prototype, and if it's a detached proxy, then you can't have
+  // hidden properties.
 
-  // Indicates whether the hidden properties object should be created.
-  enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+  // Sets a hidden property on this object. Returns this object if successful,
+  // undefined if called on a detached proxy, and a failure if a GC
+  // is required
+  MaybeObject* SetHiddenProperty(String* key, Object* value);
+  // Gets the value of a hidden property with the given key. Returns undefined
+  // if the property doesn't exist (or if called on a detached proxy),
+  // otherwise returns the value set for the key.
+  Object* GetHiddenProperty(String* key);
+  // Deletes a hidden property. Deleting a non-existing property is
+  // considered successful.
+  void DeleteHiddenProperty(String* key);
+  // Returns true if the object has a property with the hidden symbol as name.
+  bool HasHiddenProperties();
 
-  // Retrieves the hidden properties object.
-  //
-  // The undefined value might be returned in case no hidden properties object
-  // is present and creation was omitted.
-  inline bool HasHiddenProperties();
-  MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
-
-  // Retrieves a permanent object identity hash code.
-  //
-  // The identity hash is stored as a hidden property. The undefined value might
-  // be returned in case no hidden properties object is present and creation was
-  // omitted.
-  MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+  MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
 
-  // Tests for the fast common case for property enumeration.
-  bool IsSimpleEnum();
+  inline void ValidateSmiOnlyElements();
+
+  // Makes sure that this object can contain non-smi Object as elements.
+  inline MaybeObject* EnsureCanContainNonSmiElements();
+
+  // Makes sure that this object can contain the specified elements.
+  inline MaybeObject* EnsureCanContainElements(Object** elements,
+                                               uint32_t count);
+  inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+  MaybeObject* EnsureCanContainElements(Arguments* arguments,
+                                        uint32_t first_arg,
+                                        uint32_t arg_count);
 
   // Do we want to keep the elements in fast case when increasing the
   // capacity?
@@ -1711,7 +1637,6 @@
   bool CanConvertToFastDoubleElements();
 
   // Tells whether the index'th element is present.
-  inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
 
   // Computes the new capacity when expanding the elements of a JSObject.
@@ -1747,6 +1672,7 @@
                                               Object* value,
                                               StrictModeFlag strict_mode,
                                               bool check_prototype);
+
   MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
                                                     Object* value,
                                                     StrictModeFlag strict_mode,
@@ -1769,15 +1695,21 @@
   // The undefined object if index is out of bounds.
   MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
 
+  enum SetFastElementsCapacityMode {
+    kAllowSmiOnlyElements,
+    kDontAllowSmiOnlyElements
+  };
+
   // Replace the elements' backing store with fast elements of the given
   // capacity.  Update the length for JSArrays.  Returns the new backing
   // store.
-  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
-                                                                int length);
+  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+      int capacity,
+      int length,
+      SetFastElementsCapacityMode set_capacity_mode);
   MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
       int capacity,
       int length);
-  MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
 
   // Lookup interceptors are used for handling properties controlled by host
   // objects.
@@ -1800,10 +1732,7 @@
   inline int GetInternalFieldOffset(int index);
   inline Object* GetInternalField(int index);
   inline void SetInternalField(int index, Object* value);
-
-  // Lookup a property.  If found, the result is valid and has
-  // detailed information.
-  void LocalLookup(String* name, LookupResult* result);
+  inline void SetInternalField(int index, Smi* value);
 
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1860,6 +1789,15 @@
       Object* value,
       PropertyAttributes attributes);
 
+  // Returns a new map with all transitions dropped from the object's current
+  // map and the ElementsKind set.
+  static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+                                              ElementsKind to_kind);
+  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+      ElementsKind elements_kind);
+
+  MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
+
   // Converts a descriptor of any other type to a real field,
   // backed by the properties array.  Descriptors of visible
   // types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1902,8 +1840,14 @@
       PropertyNormalizationMode mode,
       int expected_additional_properties);
 
+  // Convert and update the elements backing store to be a NumberDictionary
+  // dictionary.  Returns the backing after conversion.
   MUST_USE_RESULT MaybeObject* NormalizeElements();
 
+  static void UpdateMapCodeCache(Handle<JSObject> object,
+                                 Handle<String> name,
+                                 Handle<Code> code);
+
   MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
 
   // Transform slow named properties to fast variants.
@@ -1923,11 +1867,14 @@
                                        WriteBarrierMode mode
                                        = UPDATE_WRITE_BARRIER);
 
-  // initializes the body after properties slot, properties slot is
-  // initialized by set_properties
-  // Note: this call does not update write barrier, it is caller's
-  // reponsibility to ensure that *v* can be collected without WB here.
-  inline void InitializeBody(int object_size, Object* value);
+  // Initializes the body after properties slot, properties slot is
+  // initialized by set_properties.  Fill the pre-allocated fields with
+  // pre_allocated_value and the rest with filler_value.
+  // Note: this call does not update write barrier, the caller is responsible
+  // to ensure that |filler_value| can be collected without WB here.
+  inline void InitializeBody(Map* map,
+                             Object* pre_allocated_value,
+                             Object* filler_value);
 
   // Check whether this object references another object
   bool ReferencesObject(Object* obj);
@@ -1962,6 +1909,10 @@
   void PrintElements(FILE* out);
 #endif
 
+  void PrintElementsTransition(
+      FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
+      ElementsKind to_kind, FixedArrayBase* to_elements);
+
 #ifdef DEBUG
   // Structure for collecting spill information about JSObjects.
   class SpillInformation {
@@ -1985,6 +1936,11 @@
 #endif
   Object* SlowReverseLookup(Object* value);
 
+  // Getters and setters are stored in a fixed array property.
+  // These are constants for their indices.
+  static const int kGetterIndex = 0;
+  static const int kSetterIndex = 1;
+
   // Maximal number of fast properties for the JSObject. Used to
   // restrict the number of map transitions to avoid an explosion in
   // the number of maps for objects used as dictionaries.
@@ -2052,6 +2008,18 @@
       StrictModeFlag strict_mode,
       bool check_prototype);
 
+  // Searches the prototype chain for a callback setter and sets the property
+  // with the setter if it finds one. The '*found' flag indicates whether
+  // a setter was found or not.
+  // This function can cause GC and can return a failure result with
+  // '*found==true'.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      bool* found,
+      StrictModeFlag strict_mode);
+
   MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
                                                              DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
@@ -2090,6 +2058,15 @@
 
   void LookupInDescriptor(String* name, LookupResult* result);
 
+  // Returns the hidden properties backing store object, currently
+  // a StringDictionary, stored on this object.
+  // If no hidden properties object has been put on this object,
+  // return undefined, unless create_if_absent is true, in which case
+  // a new dictionary is created, added to this object, and returned.
+  MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent);
+  // Updates the existing hidden properties dictionary.
+  MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -2207,7 +2184,9 @@
  protected:
   // Set operation on FixedArray without using write barriers. Can
   // only be used for storing old space objects or smis.
-  static inline void fast_set(FixedArray* array, int index, Object* value);
+  static inline void NoWriteBarrierSet(FixedArray* array,
+                                       int index,
+                                       Object* value);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
@@ -2219,7 +2198,7 @@
  public:
   inline void Initialize(FixedArray* from);
   inline void Initialize(FixedDoubleArray* from);
-  inline void Initialize(SeededNumberDictionary* from);
+  inline void Initialize(NumberDictionary* from);
 
   // Setter and getter for elements.
   inline double get_scalar(int index);
@@ -2230,6 +2209,9 @@
   // Checking for the hole.
   inline bool is_the_hole(int index);
 
+  // Copy operations
+  MUST_USE_RESULT inline MaybeObject* Copy();
+
   // Garbage collection support.
   inline static int SizeFor(int length) {
     return kHeaderSize + length * kDoubleSize;
@@ -2269,6 +2251,9 @@
 };
 
 
+class IncrementalMarking;
+
+
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
 // TODO(1399): It should be possible to make room for bit_field3 in the map
@@ -2310,7 +2295,7 @@
   // Set next enumeration index and flush any enum cache.
   void SetNextEnumerationIndex(int value) {
     if (!IsEmpty()) {
-      fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
+      set(kEnumerationIndexIndex, Smi::FromInt(value));
     }
   }
   bool HasEnumCache() {
@@ -2347,13 +2332,27 @@
   inline bool IsNullDescriptor(int descriptor_number);
   inline bool IsDontEnum(int descriptor_number);
 
+  class WhitenessWitness {
+   public:
+    inline explicit WhitenessWitness(DescriptorArray* array);
+    inline ~WhitenessWitness();
+
+   private:
+    IncrementalMarking* marking_;
+  };
+
   // Accessor for complete descriptor.
   inline void Get(int descriptor_number, Descriptor* desc);
-  inline void Set(int descriptor_number, Descriptor* desc);
+  inline void Set(int descriptor_number,
+                  Descriptor* desc,
+                  const WhitenessWitness&);
 
   // Transfer complete descriptor from another descriptor array to
   // this one.
-  inline void CopyFrom(int index, DescriptorArray* src, int src_index);
+  inline void CopyFrom(int index,
+                       DescriptorArray* src,
+                       int src_index,
+                       const WhitenessWitness&);
 
   // Copy the descriptor array, insert a new descriptor and optionally
   // remove map transitions.  If the descriptor is already present, it is
@@ -2370,11 +2369,11 @@
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Does not check for duplicates.
-  void SortUnchecked();
+  void SortUnchecked(const WhitenessWitness&);
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Checks the result for duplicates.
-  void Sort();
+  void Sort(const WhitenessWitness&);
 
   // Search the instance descriptors for given name.
   inline int Search(String* name);
@@ -2467,10 +2466,12 @@
         NULL_DESCRIPTOR;
   }
   // Swap operation on FixedArray without using write barriers.
-  static inline void fast_swap(FixedArray* array, int first, int second);
+  static inline void NoWriteBarrierSwap(FixedArray* array,
+                                        int first,
+                                        int second);
 
   // Swap descriptor first and second.
-  inline void Swap(int first, int second);
+  inline void NoWriteBarrierSwapDescriptors(int first, int second);
 
   FixedArray* GetContentArray() {
     return FixedArray::cast(get(kContentArrayIndex));
@@ -2488,7 +2489,7 @@
 // encountered and stops when unused elements are encountered.
 //
 // - Elements with key == undefined have not been used yet.
-// - Elements with key == null have been deleted.
+// - Elements with key == the_hole have been deleted.
 //
 // The hash table class is parameterized with a Shape and a Key.
 // Shape must be a class with the following interface:
@@ -2512,42 +2513,9 @@
 // beginning of the backing storage that can be used for non-element
 // information by subclasses.
 
-template<typename Key>
-class BaseShape {
- public:
-  static const bool UsesSeed = false;
-  static uint32_t Hash(Key key) { return 0; }
-  static uint32_t SeededHash(Key key, uint32_t seed) {
-    ASSERT(UsesSeed);
-    return Hash(key);
-  }
-  static uint32_t HashForObject(Key key, Object* object) { return 0; }
-  static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
-    // Won't be called if UsesSeed isn't overridden by child class.
-    return HashForObject(key, object);
-  }
-};
-
 template<typename Shape, typename Key>
 class HashTable: public FixedArray {
  public:
-  // Wrapper methods
-  inline uint32_t Hash(Key key) {
-    if (Shape::UsesSeed) {
-      return Shape::SeededHash(key, GetHeap()->HashSeed());
-    } else {
-      return Shape::Hash(key);
-    }
-  }
-
-  inline uint32_t HashForObject(Key key, Object* object) {
-    if (Shape::UsesSeed) {
-      return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
-    } else {
-      return Shape::HashForObject(key, object);
-    }
-  }
-
   // Returns the number of elements in the hash table.
   int NumberOfElements() {
     return Smi::cast(get(kNumberOfElementsIndex))->value();
@@ -2590,10 +2558,10 @@
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
 
-  // Tells whether k is a real key.  Null and undefined are not allowed
+  // Tells whether k is a real key.  The hole and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   bool IsKey(Object* k) {
-    return !k->IsNull() && !k->IsUndefined();
+    return !k->IsTheHole() && !k->IsUndefined();
   }
 
   // Garbage collection support.
@@ -2645,12 +2613,12 @@
 
   // Update the number of elements in the hash table.
   void SetNumberOfElements(int nof) {
-    fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
+    set(kNumberOfElementsIndex, Smi::FromInt(nof));
   }
 
   // Update the number of deleted elements in the hash table.
   void SetNumberOfDeletedElements(int nod) {
-    fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+    set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
   }
 
   // Sets the capacity of the hash table.
@@ -2660,7 +2628,7 @@
     // and non-zero.
     ASSERT(capacity > 0);
     ASSERT(capacity <= kMaxCapacity);
-    fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
+    set(kCapacityIndex, Smi::FromInt(capacity));
   }
 
 
@@ -2689,6 +2657,7 @@
 };
 
 
+
 // HashTableKey is an abstract superclass for virtual key behavior.
 class HashTableKey {
  public:
@@ -2705,8 +2674,7 @@
   virtual ~HashTableKey() {}
 };
 
-
-class SymbolTableShape : public BaseShape<HashTableKey*> {
+class SymbolTableShape {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -2765,7 +2733,7 @@
 };
 
 
-class MapCacheShape : public BaseShape<HashTableKey*> {
+class MapCacheShape {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -2868,7 +2836,7 @@
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
-    this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
+    this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
   }
 
   int NextEnumerationIndex() {
@@ -2921,7 +2889,7 @@
 };
 
 
-class StringDictionaryShape : public BaseShape<String*> {
+class StringDictionaryShape {
  public:
   static inline bool IsMatch(String* key, Object* other);
   static inline uint32_t Hash(String* key);
@@ -2948,48 +2916,29 @@
       JSObject* obj,
       int unused_property_fields);
 
-  // Find entry for key otherwise return kNotFound. Optimzed version of
+  // Find entry for key, otherwise return kNotFound. Optimized version of
   // HashTable::FindEntry.
   int FindEntry(String* key);
 };
 
 
-class NumberDictionaryShape : public BaseShape<uint32_t> {
+class NumberDictionaryShape {
  public:
   static inline bool IsMatch(uint32_t key, Object* other);
+  static inline uint32_t Hash(uint32_t key);
+  static inline uint32_t HashForObject(uint32_t key, Object* object);
   MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
+  static const int kPrefixSize = 2;
   static const int kEntrySize = 3;
   static const bool kIsEnumerable = false;
 };
 
 
-class SeededNumberDictionaryShape : public NumberDictionaryShape {
+class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
  public:
-  static const bool UsesSeed = true;
-  static const int kPrefixSize = 2;
-
-  static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
-  static inline uint32_t SeededHashForObject(uint32_t key,
-                                             uint32_t seed,
-                                             Object* object);
-};
-
-
-class UnseededNumberDictionaryShape : public NumberDictionaryShape {
- public:
-  static const int kPrefixSize = 0;
-
-  static inline uint32_t Hash(uint32_t key);
-  static inline uint32_t HashForObject(uint32_t key, Object* object);
-};
-
-
-class SeededNumberDictionary
-    : public Dictionary<SeededNumberDictionaryShape, uint32_t> {
- public:
-  static SeededNumberDictionary* cast(Object* obj) {
+  static NumberDictionary* cast(Object* obj) {
     ASSERT(obj->IsDictionary());
-    return reinterpret_cast<SeededNumberDictionary*>(obj);
+    return reinterpret_cast<NumberDictionary*>(obj);
   }
 
   // Type specific at put (default NONE attributes is used when adding).
@@ -3018,9 +2967,6 @@
   // requires_slow_elements returns false.
   inline uint32_t max_number_key();
 
-  // Remove all entries were key is a number and (from <= key && key < to).
-  void RemoveNumberEntries(uint32_t from, uint32_t to);
-
   // Bit masks.
   static const int kRequiresSlowElementsMask = 1;
   static const int kRequiresSlowElementsTagSize = 1;
@@ -3028,37 +2974,41 @@
 };
 
 
-class UnseededNumberDictionary
-    : public Dictionary<UnseededNumberDictionaryShape, uint32_t> {
+template <int entrysize>
+class ObjectHashTableShape {
  public:
-  static UnseededNumberDictionary* cast(Object* obj) {
-    ASSERT(obj->IsDictionary());
-    return reinterpret_cast<UnseededNumberDictionary*>(obj);
+  static inline bool IsMatch(Object* key, Object* other);
+  static inline uint32_t Hash(Object* key);
+  static inline uint32_t HashForObject(Object* key, Object* object);
+  MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = entrysize;
+};
+
+
+// ObjectHashSet holds keys that are arbitrary objects by using the identity
+// hash of the key for hashing purposes.
+class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
+ public:
+  static inline ObjectHashSet* cast(Object* obj) {
+    ASSERT(obj->IsHashTable());
+    return reinterpret_cast<ObjectHashSet*>(obj);
   }
 
-  // Type specific at put (default NONE attributes is used when adding).
-  MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
-  MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
+  // Looks up whether the given key is part of this hash set.
+  bool Contains(Object* key);
 
-  // Set an existing entry or add a new one if needed.
-  MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
+  // Adds the given key to this hash set.
+  MUST_USE_RESULT MaybeObject* Add(Object* key);
+
+  // Removes the given key from this hash set.
+  MUST_USE_RESULT MaybeObject* Remove(Object* key);
 };
 
 
-class ObjectHashTableShape : public BaseShape<Object*> {
- public:
-  static inline bool IsMatch(JSObject* key, Object* other);
-  static inline uint32_t Hash(JSObject* key);
-  static inline uint32_t HashForObject(JSObject* key, Object* object);
-  MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
-  static const int kPrefixSize = 0;
-  static const int kEntrySize = 2;
-};
-
-
-// ObjectHashTable maps keys that are JavaScript objects to object values by
+// ObjectHashTable maps keys that are arbitrary objects to object values by
 // using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
  public:
   static inline ObjectHashTable* cast(Object* obj) {
     ASSERT(obj->IsHashTable());
@@ -3067,18 +3017,17 @@
 
   // Looks up the value associated with the given key. The undefined value is
   // returned in case the key is not present.
-  Object* Lookup(JSObject* key);
+  Object* Lookup(Object* key);
 
   // Adds (or overwrites) the value associated with the given key. Mapping a
   // key to the undefined value causes removal of the whole entry.
-  MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+  MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
 
  private:
   friend class MarkCompactCollector;
 
-  void AddEntry(int entry, JSObject* key, Object* value);
-  void RemoveEntry(int entry, Heap* heap);
-  inline void RemoveEntry(int entry);
+  void AddEntry(int entry, Object* key, Object* value);
+  void RemoveEntry(int entry);
 
   // Returns the index to the value of an entry.
   static inline int EntryToValueIndex(int entry) {
@@ -3125,6 +3074,207 @@
 };
 
 
+// ScopeInfo represents information about different scopes of a source
+// program  and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
+// This object provides quick access to scope info details for runtime
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+  static inline ScopeInfo* cast(Object* object);
+
+  // Return the type of this scope.
+  ScopeType Type();
+
+  // Does this scope call eval?
+  bool CallsEval();
+
+  // Return the language mode of this scope.
+  LanguageMode language_mode();
+
+  // Does this scope make a non-strict eval call?
+  bool CallsNonStrictEval() {
+    return CallsEval() && (language_mode() == CLASSIC_MODE);
+  }
+
+  // Return the total number of locals allocated on the stack and in the
+  // context. This includes the parameters that are allocated in the context.
+  int LocalCount();
+
+  // Return the number of stack slots for code. This number consists of two
+  // parts:
+  //  1. One stack slot per stack allocated local.
+  //  2. One stack slot for the function name if it is stack allocated.
+  int StackSlotCount();
+
+  // Return the number of context slots for code if a context is allocated. This
+  // number consists of three parts:
+  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+  //  2. One context slot per context allocated local.
+  //  3. One context slot for the function name if it is context allocated.
+  // Parameters allocated in the context count as context allocated locals. If
+  // no contexts are allocated for this scope ContextLength returns 0.
+  int ContextLength();
+
+  // Is this scope the scope of a named function expression?
+  bool HasFunctionName();
+
+  // Return if this has context allocated locals.
+  bool HasHeapAllocatedLocals();
+
+  // Return if contexts are allocated for this scope.
+  bool HasContext();
+
+  // Return the function_name if present.
+  String* FunctionName();
+
+  // Return the name of the given parameter.
+  String* ParameterName(int var);
+
+  // Return the name of the given local.
+  String* LocalName(int var);
+
+  // Return the name of the given stack local.
+  String* StackLocalName(int var);
+
+  // Return the name of the given context local.
+  String* ContextLocalName(int var);
+
+  // Return the mode of the given context local.
+  VariableMode ContextLocalMode(int var);
+
+  // Return the initialization flag of the given context local.
+  InitializationFlag ContextLocalInitFlag(int var);
+
+  // Lookup support for serialized scope info. Returns the
+  // the stack slot index for a given slot name if the slot is
+  // present; otherwise returns a value < 0. The name must be a symbol
+  // (canonicalized).
+  int StackSlotIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // context slot index for a given slot name if the slot is present; otherwise
+  // returns a value < 0. The name must be a symbol (canonicalized).
+  // If the slot is present and mode != NULL, sets *mode to the corresponding
+  // mode for that variable.
+  int ContextSlotIndex(String* name,
+                       VariableMode* mode,
+                       InitializationFlag* init_flag);
+
+  // Lookup support for serialized scope info. Returns the
+  // parameter index for a given parameter name if the parameter is present;
+  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+  int ParameterIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // function context slot index if the function name is present (named
+  // function expressions, only), otherwise returns a value < 0. The name
+  // must be a symbol (canonicalized).
+  int FunctionContextSlotIndex(String* name, VariableMode* mode);
+
+  static Handle<ScopeInfo> Create(Scope* scope);
+
+  // Serializes empty scope info.
+  static ScopeInfo* Empty();
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+  // The layout of the static part of a ScopeInfo is as follows. Each entry is
+  // numeric and occupies one array slot.
+  // 1. A set of properties of the scope
+  // 2. The number of parameters. This only applies to function scopes. For
+  //    non-function scopes this is 0.
+  // 3. The number of non-parameter variables allocated on the stack.
+  // 4. The number of non-parameter and parameter variables allocated in the
+  //    context.
+#define FOR_EACH_NUMERIC_FIELD(V)          \
+  V(Flags)                                 \
+  V(ParameterCount)                        \
+  V(StackLocalCount)                       \
+  V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name)                            \
+  void Set##name(int value) {                            \
+    set(k##name, Smi::FromInt(value));                   \
+  }                                                      \
+  int name() {                                           \
+    if (length() > 0) {                                  \
+      return Smi::cast(get(k##name))->value();           \
+    } else {                                             \
+      return 0;                                          \
+    }                                                    \
+  }
+  FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+ private:
+  enum {
+#define DECL_INDEX(name) k##name,
+  FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+#undef FOR_EACH_NUMERIC_FIELD
+  kVariablePartIndex
+  };
+
+  // The layout of the variable part of a ScopeInfo is as follows:
+  // 1. ParameterEntries:
+  //    This part stores the names of the parameters for function scopes. One
+  //    slot is used per parameter, so in total this part occupies
+  //    ParameterCount() slots in the array. For other scopes than function
+  //    scopes ParameterCount() is 0.
+  // 2. StackLocalEntries:
+  //    Contains the names of local variables that are allocated on the stack,
+  //    in increasing order of the stack slot index. One slot is used per stack
+  //    local, so in total this part occupies StackLocalCount() slots in the
+  //    array.
+  // 3. ContextLocalNameEntries:
+  //    Contains the names of local variables and parameters that are allocated
+  //    in the context. They are stored in increasing order of the context slot
+  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount() slots
+  //    in the array.
+  // 4. ContextLocalInfoEntries:
+  //    Contains the variable modes and initialization flags corresponding to
+  //    the context locals in ContextLocalNameEntries. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount()
+  //    slots in the array.
+  // 5. FunctionNameEntryIndex:
+  //    If the scope belongs to a named function expression this part contains
+  //    information about the function variable. It always occupies two array
+  //    slots:  a. The name of the function variable.
+  //            b. The context or stack slot index for the variable.
+  int ParameterEntriesIndex();
+  int StackLocalEntriesIndex();
+  int ContextLocalNameEntriesIndex();
+  int ContextLocalInfoEntriesIndex();
+  int FunctionNameEntryIndex();
+
+  // Location of the function variable for named function expressions.
+  enum FunctionVariableInfo {
+    NONE,     // No function name present.
+    STACK,    // Function
+    CONTEXT,
+    UNUSED
+  };
+
+  // Properties of scopes.
+  class TypeField:             public BitField<ScopeType,            0, 3> {};
+  class CallsEvalField:        public BitField<bool,                 3, 1> {};
+  class LanguageModeField:     public BitField<LanguageMode,         4, 2> {};
+  class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
+  class FunctionVariableMode:  public BitField<VariableMode,         8, 3> {};
+
+  // BitFields representing the encoded information for context locals in the
+  // ContextLocalInfoEntries part.
+  class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
+  class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
+};
+
+
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
 // needs very limited number of distinct normalized maps.
@@ -3146,11 +3296,12 @@
 };
 
 
-// ByteArray represents fixed sized byte arrays.  Used by the outside world,
-// such as PCRE, and also by the memory allocator and garbage collector to
-// fill in free blocks in the heap.
+// ByteArray represents fixed sized byte arrays.  Used for the relocation info
+// that is attached to code objects.
 class ByteArray: public FixedArrayBase {
  public:
+  inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
@@ -3207,6 +3358,41 @@
 };
 
 
+// FreeSpace represents fixed sized areas of the heap that are not currently in
+// use.  Used by the heap and GC.
+class FreeSpace: public HeapObject {
+ public:
+  // [size]: size of the free space including the header.
+  inline int size();
+  inline void set_size(int value);
+
+  inline int Size() { return size(); }
+
+  // Casting.
+  static inline FreeSpace* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void FreeSpacePrint() {
+    FreeSpacePrint(stdout);
+  }
+  void FreeSpacePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void FreeSpaceVerify();
+#endif
+
+  // Layout description.
+  // Size is smi tagged when it is stored.
+  static const int kSizeOffset = HeapObject::kHeaderSize;
+  static const int kHeaderSize = kSizeOffset + kPointerSize;
+
+  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
+};
+
+
 // An ExternalArray represents a fixed-size array of primitive values
 // which live outside the JavaScript heap. Its subclasses are used to
 // implement the CanvasArray types being defined in the WebGL
@@ -3553,11 +3739,6 @@
   DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
   DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
 
-  // Unchecked accessor to be used during GC.
-  FixedArray* UncheckedLiteralArray() {
-    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
-  }
-
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
@@ -3699,6 +3880,9 @@
   DECL_ACCESSORS(relocation_info, ByteArray)
   void InvalidateRelocation();
 
+  // [handler_table]: Fixed array containing offsets of exception handlers.
+  DECL_ACCESSORS(handler_table, FixedArray)
+
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
@@ -3742,6 +3926,11 @@
   inline int major_key();
   inline void set_major_key(int value);
 
+  // For stubs, tells whether they should always exist, so that they can be
+  // called from other stubs.
+  inline bool is_pregenerated();
+  inline void set_is_pregenerated(bool value);
+
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -3756,6 +3945,11 @@
   inline bool has_debug_break_slots();
   inline void set_has_debug_break_slots(bool value);
 
+  // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
+  // been compiled with IsOptimizing set to true.
+  inline bool is_compiled_optimizable();
+  inline void set_compiled_optimizable(bool value);
+
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -3801,6 +3995,11 @@
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
 
+  // For kind STUB, major_key == CallFunction, tells whether there is
+  // a function cache in the instruction stream.
+  inline bool has_function_cache();
+  inline void set_has_function_cache(bool flag);
+
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
@@ -3905,10 +4104,6 @@
   void CodeVerify();
 #endif
 
-  // Returns the isolate/heap this code object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -3916,8 +4111,9 @@
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
+  static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
   static const int kDeoptimizationDataOffset =
-      kRelocationInfoOffset + kPointerSize;
+      kHandlerTableOffset + kPointerSize;
   static const int kNextCodeFlushingCandidateOffset =
       kDeoptimizationDataOffset + kPointerSize;
   static const int kFlagsOffset =
@@ -3944,11 +4140,13 @@
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
   static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
+  static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
 
   static const int kFullCodeFlags = kOptimizableOffset + 1;
   class FullCodeFlagsHasDeoptimizationSupportField:
       public BitField<bool, 0, 1> {};  // NOLINT
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
+  class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
@@ -3963,9 +4161,10 @@
   class KindField: public BitField<Kind, 7, 4> {};
   class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
   class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
+  class IsPregeneratedField: public BitField<bool, 14, 1> {};
 
   // Signed field cannot be encoded using the BitField class.
-  static const int kArgumentsCountShift = 14;
+  static const int kArgumentsCountShift = 15;
   static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
 
   static const int kFlagsNotUsedInLookup =
@@ -4101,8 +4300,12 @@
         (bit_field2() & kElementsKindMask) >> kElementsKindShift);
   }
 
+  // Tells whether the instance has fast elements that are only Smis.
+  inline bool has_fast_smi_only_elements() {
+    return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
+  }
+
   // Tells whether the instance has fast elements.
-  // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
   inline bool has_fast_elements() {
     return elements_kind() == FAST_ELEMENTS;
   }
@@ -4111,6 +4314,10 @@
     return elements_kind() == FAST_DOUBLE_ELEMENTS;
   }
 
+  inline bool has_non_strict_arguments_elements() {
+    return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+  }
+
   inline bool has_external_array_elements() {
     ElementsKind kind(elements_kind());
     return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4121,6 +4328,9 @@
     return elements_kind() == DICTIONARY_ELEMENTS;
   }
 
+  static bool IsValidElementsTransition(ElementsKind from_kind,
+                                        ElementsKind to_kind);
+
   // Tells whether the map is attached to SharedFunctionInfo
   // (for inobject slack tracking).
   inline void set_attached_to_shared_function_info(bool value);
@@ -4169,6 +4379,7 @@
   //    1 + 2 * i: prototype
   //    2 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
+
   inline FixedArray* unchecked_prototype_transitions();
 
   static const int kProtoTransitionHeaderSize = 1;
@@ -4178,14 +4389,14 @@
   static const int kProtoTransitionMapOffset = 1;
 
   inline int NumberOfProtoTransitions() {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     if (cache->length() == 0) return 0;
     return
         Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
   }
 
   inline void SetNumberOfProtoTransitions(int value) {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     ASSERT(cache->length() != 0);
     cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
                          Smi::FromInt(value));
@@ -4207,27 +4418,6 @@
   // instance descriptors.
   MUST_USE_RESULT MaybeObject* CopyDropTransitions();
 
-  // Returns this map if it already has elements that are fast, otherwise
-  // returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
-
-  // Returns this map if it already has fast elements that are doubles,
-  // otherwise returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
-
-  // Returns this map if already has dictionary elements, otherwise returns a
-  // copy of the map, with all transitions dropped from the descriptors and the
-  // ElementsKind set to DICTIONARY_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
-
-  // Returns a new map with all transitions dropped from the descriptors and the
-  // ElementsKind set.
-  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
-      ElementsKind elements_kind,
-      bool safe_to_add_transition);
-
   // Returns the property index for name (only valid for FAST MODE).
   int PropertyIndexFor(String* name);
 
@@ -4249,6 +4439,9 @@
   inline void ClearCodeCache(Heap* heap);
 
   // Update code cache.
+  static void UpdateCodeCache(Handle<Map> map,
+                              Handle<String> name,
+                              Handle<Code> code);
   MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
 
   // Returns the found code or undefined if absent.
@@ -4266,6 +4459,8 @@
   // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
   void CreateBackPointers();
 
+  void CreateOneBackPointer(Map* transition_target);
+
   // Set all map transitions from this map to dead maps to null.
   // Also, restore the original prototype on the targets of these
   // transitions, so that we do not process this map again while
@@ -4287,6 +4482,31 @@
     return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
   }
 
+  // Returns the contents of this map's descriptor array for the given string.
+  // May return NULL. |safe_to_add_transition| is set to false and NULL
+  // is returned if adding transitions is not allowed.
+  Object* GetDescriptorContents(String* sentinel_name,
+                                bool* safe_to_add_transitions);
+
+  // Returns the map that this map transitions to if its elements_kind
+  // is changed to |elements_kind|, or NULL if no such map is cached yet.
+  // |safe_to_add_transitions| is set to false if adding transitions is not
+  // allowed.
+  Map* LookupElementsTransitionMap(ElementsKind elements_kind,
+                                   bool* safe_to_add_transition);
+
+  // Adds an entry to this map's descriptor array for a transition to
+  // |transitioned_map| when its elements_kind is changed to |elements_kind|.
+  MaybeObject* AddElementsTransition(ElementsKind elements_kind,
+                                     Map* transitioned_map);
+
+  // Returns the transitioned map for this map with the most generic
+  // elements_kind that's found in |candidates|, or null handle if no match is
+  // found at all.
+  Handle<Map> FindTransitionedMap(MapHandleList* candidates);
+  Map* FindTransitionedMap(MapList* candidates);
+
+
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void MapPrint() {
@@ -4302,10 +4522,6 @@
   inline int visitor_id();
   inline void set_visitor_id(int visitor_id);
 
-  // Returns the isolate/heap this map belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   typedef void (*TraverseCallback)(Map* map, void* data);
 
   void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4342,7 +4558,7 @@
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
-  // being continiously allocated.
+  // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
       Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4382,7 +4598,7 @@
   static const int kStringWrapperSafeForDefaultValueOf = 2;
   static const int kAttachedToSharedFunctionInfo = 3;
   // No bits can be used after kElementsKindFirstBit, they are all reserved for
-  // storing ElementKind.  for anything other than storing the ElementKind.
+  // storing ElementKind.
   static const int kElementsKindShift = 4;
   static const int kElementsKindBitCount = 4;
 
@@ -4391,6 +4607,9 @@
       ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
   static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
       (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+  static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
+      static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
+                          Map::kElementsKindShift) - 1;
 
   // Bit positions for bit field 3
   static const int kIsShared = 0;
@@ -4405,6 +4624,7 @@
                               kSize> BodyDescriptor;
 
  private:
+  String* elements_transition_sentinel_name();
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
 };
 
@@ -4572,7 +4792,7 @@
   DECL_ACCESSORS(code, Code)
 
   // [scope_info]: Scope info.
-  DECL_ACCESSORS(scope_info, SerializedScopeInfo)
+  DECL_ACCESSORS(scope_info, ScopeInfo)
 
   // [construct stub]: Code stub for constructing instances of this function.
   DECL_ACCESSORS(construct_stub, Code)
@@ -4794,8 +5014,20 @@
   // spending time attempting to optimize it again.
   DECL_BOOLEAN_ACCESSORS(optimization_disabled)
 
-  // Indicates whether the function is a strict mode function.
-  DECL_BOOLEAN_ACCESSORS(strict_mode)
+  // Indicates the language mode of the function's code as defined by the
+  // current harmony drafts for the next ES language standard. Possible
+  // values are:
+  // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
+  // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
+  // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
+  inline LanguageMode language_mode();
+  inline void set_language_mode(LanguageMode language_mode);
+
+  // Indicates whether the language mode of this function is CLASSIC_MODE.
+  inline bool is_classic_mode();
+
+  // Indicates whether the language mode of this function is EXTENDED_MODE.
+  inline bool is_extended_mode();
 
   // False if the function definitely does not allocate an arguments object.
   DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -4887,6 +5119,13 @@
   void SharedFunctionInfoVerify();
 #endif
 
+  // Helpers to compile the shared code.  Returns true on success, false on
+  // failure (e.g., stack overflow during compilation).
+  static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                             ClearExceptionFlag flag);
+  static bool CompileLazy(Handle<SharedFunctionInfo> shared,
+                          ClearExceptionFlag flag);
+
   // Casting.
   static inline SharedFunctionInfo* cast(Object* obj);
 
@@ -5011,6 +5250,7 @@
     kCodeAgeShift,
     kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
     kStrictModeFunction,
+    kExtendedModeFunction,
     kUsesArguments,
     kHasDuplicateParameters,
     kNative,
@@ -5037,22 +5277,30 @@
  public:
   // Constants for optimizing codegen for strict mode function and
   // native tests.
-  // Allows to use byte-widgh instructions.
+  // Allows to use byte-width instructions.
   static const int kStrictModeBitWithinByte =
       (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
+  static const int kExtendedModeBitWithinByte =
+      (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
   static const int kNativeBitWithinByte =
       (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+      (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
 #elif __BYTE_ORDER == __BIG_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
+      (kCompilerHintsSize - 1) -
+      ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -5108,6 +5356,14 @@
   // recompiled the next time it is executed.
   void MarkForLazyRecompilation();
 
+  // Helpers to compile this function.  Returns true on success, false on
+  // failure (e.g., stack overflow during compilation).
+  static bool CompileLazy(Handle<JSFunction> function,
+                          ClearExceptionFlag flag);
+  static bool CompileOptimized(Handle<JSFunction> function,
+                               int osr_ast_id,
+                               ClearExceptionFlag flag);
+
   // Tells whether or not the function is already marked for lazy
   // recompilation.
   inline bool IsMarkedForLazyRecompilation();
@@ -5115,7 +5371,8 @@
   // Check whether or not this function is inlineable.
   bool IsInlineable();
 
-  // [literals]: Fixed array holding the materialized literals.
+  // [literals_or_bindings]: Fixed array holding either
+  // the materialized literals or the bindings of a bound function.
   //
   // If the function contains object, regexp or array literals, the
   // literals array prefix contains the object, regexp, and array
@@ -5124,7 +5381,17 @@
   // or array functions.  Performing a dynamic lookup, we might end up
   // using the functions from a new context that we should not have
   // access to.
-  DECL_ACCESSORS(literals, FixedArray)
+  //
+  // On bound functions, the array is a (copy-on-write) fixed-array containing
+  // the function that was bound, bound this-value and any bound
+  // arguments. Bound functions never contain literals.
+  DECL_ACCESSORS(literals_or_bindings, FixedArray)
+
+  inline FixedArray* literals();
+  inline void set_literals(FixedArray* literals);
+
+  inline FixedArray* function_bindings();
+  inline void set_function_bindings(FixedArray* bindings);
 
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
@@ -5212,6 +5479,11 @@
   static const int kLiteralsPrefixSize = 1;
   static const int kLiteralGlobalContextIndex = 0;
 
+  // Layout of the bound-function binding array.
+  static const int kBoundFunctionIndex = 0;
+  static const int kBoundThisIndex = 1;
+  static const int kBoundArgumentsStartIndex = 2;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
 };
@@ -5284,6 +5556,11 @@
   }
 
   // Ensure that the global object has a cell for the given property name.
+  static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
+      Handle<GlobalObject> global,
+      Handle<String> name);
+  // TODO(kmillikin): This function can be eliminated once the stub cache is
+  // full handlified (and the static helper can be written directly).
   MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
 
   // Casting.
@@ -5296,8 +5573,6 @@
   static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
 
  private:
-  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
 };
 
@@ -5612,7 +5887,7 @@
 };
 
 
-class CompilationCacheShape : public BaseShape<HashTableKey*> {
+class CompilationCacheShape {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -5640,12 +5915,16 @@
  public:
   // Find cached value for a string key, otherwise return null.
   Object* Lookup(String* src);
-  Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
+  Object* LookupEval(String* src,
+                     Context* context,
+                     LanguageMode language_mode,
+                     int scope_position);
   Object* LookupRegExp(String* source, JSRegExp::Flags flags);
   MaybeObject* Put(String* src, Object* value);
   MaybeObject* PutEval(String* src,
                        Context* context,
-                       SharedFunctionInfo* value);
+                       SharedFunctionInfo* value,
+                       int scope_position);
   MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
 
   // Remove given value from cache.
@@ -5712,7 +5991,7 @@
 };
 
 
-class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
+class CodeCacheHashTableShape {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
     return key->IsMatch(value);
@@ -5758,10 +6037,17 @@
  public:
   DECL_ACCESSORS(cache, Object)
 
-  MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+  static void Update(Handle<PolymorphicCodeCache> cache,
+                     MapHandleList* maps,
+                     Code::Flags flags,
+                     Handle<Code> code);
+
+  MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
                                       Code::Flags flags,
                                       Code* code);
-  Object* Lookup(MapList* maps, Code::Flags flags);
+
+  // Returns an undefined value if the entry is not found.
+  Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
 
   static inline PolymorphicCodeCache* cast(Object* obj);
 
@@ -5786,8 +6072,11 @@
 class PolymorphicCodeCacheHashTable
     : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
  public:
-  Object* Lookup(MapList* maps, int code_kind);
-  MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+  Object* Lookup(MapHandleList* maps, int code_kind);
+
+  MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
+                                   int code_kind,
+                                   Code* code);
 
   static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
 
@@ -5803,7 +6092,7 @@
 
 class StringHasher {
  public:
-  explicit inline StringHasher(int length, uint32_t seed);
+  explicit inline StringHasher(int length);
 
   // Returns true if the hash of this string can be computed without
   // looking at the contents.
@@ -5834,11 +6123,6 @@
   // value is represented decimal value.
   static uint32_t MakeArrayIndexHash(uint32_t value, int length);
 
-  // No string is allowed to have a hash of zero.  That value is reserved
-  // for internal properties.  If the hash calculation yields zero then we
-  // use 27 instead.
-  static const int kZeroHash = 27;
-
  private:
   uint32_t array_index() {
     ASSERT(is_array_index());
@@ -5859,9 +6143,7 @@
 
 // Calculates string hash.
 template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars,
-                                     int length,
-                                     uint32_t seed);
+inline uint32_t HashSequentialString(const schar* chars, int length);
 
 
 // The characteristics of a string are stored in its map.  Retrieving these
@@ -6065,7 +6347,8 @@
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
       int* length_output = 0);
 
-  int Utf8Length();
+  inline int Utf8Length() { return Utf8Length(this, 0, length()); }
+  static int Utf8Length(String* input, int from, int to);
 
   // Return a 16 bit Unicode representation of the string.
   // The string should be nearly flat, otherwise the performance of
@@ -6083,8 +6366,7 @@
   inline uint32_t Hash();
 
   static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
-                                   int length,
-                                   uint32_t seed);
+                                   int length);
 
   static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
                                 uint32_t* index,
@@ -6149,10 +6431,6 @@
   // Shift constant retrieving hash code from hash field.
   static const int kHashShift = kNofHashBitFields;
 
-  // Only these bits are relevant in the hash, since the top two are shifted
-  // out.
-  static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
-
   // Array index strings this short can keep their index in the hash
   // field.
   static const int kMaxCachedArrayIndexLength = 7;
@@ -6307,6 +6585,9 @@
   // Casting.
   static inline SeqString* cast(Object* obj);
 
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
 };
@@ -6340,12 +6621,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential ASCII string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential ASCII string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -6394,12 +6671,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential two-byte string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential two-byte string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -6543,7 +6816,12 @@
 
   // Layout description.
   static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
-  static const int kSize = kResourceOffset + kPointerSize;
+  static const int kShortSize = kResourceOffset + kPointerSize;
+  static const int kResourceDataOffset = kResourceOffset + kPointerSize;
+  static const int kSize = kResourceDataOffset + kPointerSize;
+
+  // Return whether external string is short (data pointer is not cached).
+  inline bool is_short();
 
   STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
 
@@ -6561,11 +6839,19 @@
   typedef v8::String::ExternalAsciiStringResource Resource;
 
   // The underlying resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
+
+  // Update the pointer cache to the external character array.
+  // The cached pointer is always valid, as the external character array does =
+  // not move during lifetime.  Deserialization is the only exception, after
+  // which the pointer cache has to be refreshed.
+  inline void update_data_cache();
+
+  inline const char* GetChars();
 
   // Dispatched behavior.
-  uint16_t ExternalAsciiStringGet(int index);
+  inline uint16_t ExternalAsciiStringGet(int index);
 
   // Casting.
   static inline ExternalAsciiString* cast(Object* obj);
@@ -6598,14 +6884,22 @@
   typedef v8::String::ExternalStringResource Resource;
 
   // The underlying string resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
+
+  // Update the pointer cache to the external character array.
+  // The cached pointer is always valid, as the external character array does =
+  // not move during lifetime.  Deserialization is the only exception, after
+  // which the pointer cache has to be refreshed.
+  inline void update_data_cache();
+
+  inline const uint16_t* GetChars();
 
   // Dispatched behavior.
-  uint16_t ExternalTwoByteStringGet(int index);
+  inline uint16_t ExternalTwoByteStringGet(int index);
 
   // For regexp code.
-  const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+  inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
 
   // Casting.
   static inline ExternalTwoByteString* cast(Object* obj);
@@ -6750,6 +7044,9 @@
   static const byte kUndefined = 5;
   static const byte kOther = 6;
 
+  // The ToNumber value of a hidden oddball is a negative smi.
+  static const int kLeastHiddenOddballNumber = -5;
+
   typedef FixedBodyDescriptor<kToStringOffset,
                               kToNumberOffset + kPointerSize,
                               kSize> BodyDescriptor;
@@ -6785,10 +7082,6 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
-  // Returns the isolate/heap this cell object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
@@ -6800,25 +7093,56 @@
   // [handler]: The handler property.
   DECL_ACCESSORS(handler, Object)
 
+  // [hash]: The hash code property (undefined if not initialized yet).
+  DECL_ACCESSORS(hash, Object)
+
   // Casting.
   static inline JSProxy* cast(Object* obj);
 
   bool HasPropertyWithHandler(String* name);
+  bool HasElementWithHandler(uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
+      Object* receiver,
+      String* name);
+  MUST_USE_RESULT MaybeObject* GetElementWithHandler(
+      Object* receiver,
+      uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
       String* name,
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetElementWithHandler(
+      uint32_t index,
+      Object* value,
+      StrictModeFlag strict_mode);
+
+  // If the handler defines an accessor property, invoke its setter
+  // (or throw if only a getter exists) and set *found to true. Otherwise false.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool* found);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
       String* name,
       DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
+      uint32_t index,
+      DeleteMode mode);
 
   MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
       JSReceiver* receiver,
-      String* name,
-      bool* has_exception);
+      String* name);
+  MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
+      JSReceiver* receiver,
+      uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
 
   // Turn this into an (empty) JSObject.
   void Fix();
@@ -6826,6 +7150,13 @@
   // Initializes the body after the handler slot.
   inline void InitializeBody(int object_size, Object* value);
 
+  // Invoke a trap by name. If the trap does not exist on this's handler,
+  // but derived_trap is non-NULL, invoke that instead.  May cause GC.
+  Handle<Object> CallTrap(const char* name,
+                          Handle<Object> derived_trap,
+                          int argc,
+                          Handle<Object> args[]);
+
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void JSProxyPrint() {
@@ -6841,7 +7172,8 @@
   // size as a virgin JSObject. This is essential for becoming a JSObject
   // upon freeze.
   static const int kHandlerOffset = HeapObject::kHeaderSize;
-  static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+  static const int kHashOffset = kHandlerOffset + kPointerSize;
+  static const int kPaddingOffset = kHashOffset + kPointerSize;
   static const int kSize = JSObject::kHeaderSize;
   static const int kHeaderSize = kPaddingOffset;
   static const int kPaddingSize = kSize - kPaddingOffset;
@@ -6849,7 +7181,7 @@
   STATIC_CHECK(kPaddingSize >= 0);
 
   typedef FixedBodyDescriptor<kHandlerOffset,
-                              kHandlerOffset + kPointerSize,
+                              kPaddingOffset,
                               kSize> BodyDescriptor;
 
  private:
@@ -6880,7 +7212,7 @@
 #endif
 
   // Layout description.
-  static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
+  static const int kCallTrapOffset = JSProxy::kPaddingOffset;
   static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
   static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
   static const int kSize = JSFunction::kSize;
@@ -6897,18 +7229,69 @@
 };
 
 
+// The JSSet describes EcmaScript Harmony sets
+class JSSet: public JSObject {
+ public:
+  // [set]: the backing hash set containing keys.
+  DECL_ACCESSORS(table, Object)
+
+  // Casting.
+  static inline JSSet* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void JSSetPrint() {
+    JSSetPrint(stdout);
+  }
+  void JSSetPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSSetVerify();
+#endif
+
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
+};
+
+
+// The JSMap describes EcmaScript Harmony maps
+class JSMap: public JSObject {
+ public:
+  // [table]: the backing hash table mapping keys to values.
+  DECL_ACCESSORS(table, Object)
+
+  // Casting.
+  static inline JSMap* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void JSMapPrint() {
+    JSMapPrint(stdout);
+  }
+  void JSMapPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSMapVerify();
+#endif
+
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
+};
+
+
 // The JSWeakMap describes EcmaScript Harmony weak maps
 class JSWeakMap: public JSObject {
  public:
   // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, ObjectHashTable)
+  DECL_ACCESSORS(table, Object)
 
   // [next]: linked list of encountered weak maps during GC.
   DECL_ACCESSORS(next, Object)
 
-  // Unchecked accessors to be used during GC.
-  inline ObjectHashTable* unchecked_table();
-
   // Casting.
   static inline JSWeakMap* cast(Object* obj);
 
@@ -6937,8 +7320,8 @@
 class Foreign: public HeapObject {
  public:
   // [address]: field containing the address.
-  inline Address address();
-  inline void set_address(Address value);
+  inline Address foreign_address();
+  inline void set_foreign_address(Address value);
 
   // Casting.
   static inline Foreign* cast(Object* obj);
@@ -6961,10 +7344,10 @@
 
   // Layout description.
 
-  static const int kAddressOffset = HeapObject::kHeaderSize;
-  static const int kSize = kAddressOffset + kPointerSize;
+  static const int kForeignAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kForeignAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
+  STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -6994,7 +7377,7 @@
   MUST_USE_RESULT MaybeObject* Initialize(int capacity);
 
   // Set the content of the array to the content of storage.
-  inline void SetContent(FixedArray* storage);
+  inline MaybeObject* SetContent(FixedArray* storage);
 
   // Casting.
   static inline JSArray* cast(Object* obj);
@@ -7210,7 +7593,6 @@
   static const int kPropertyListOffset = kTagOffset + kPointerSize;
   static const int kHeaderSize         = kPropertyListOffset + kPointerSize;
  protected:
-  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
   DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
 };
 
@@ -7514,11 +7896,16 @@
   // Handy shorthand for visiting a single pointer.
   virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
 
+  // Visit pointer embedded into a code object.
+  virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+
   // Visits a contiguous arrays of external references (references to the C++
   // heap) in the half-open range [start, end). Any or all of the values
   // may be modified on return.
   virtual void VisitExternalReferences(Address* start, Address* end) {}
 
+  virtual void VisitExternalReference(RelocInfo* rinfo);
+
   inline void VisitExternalReference(Address* p) {
     VisitExternalReferences(p, p + 1);
   }
diff --git a/src/parser.cc b/src/parser.cc
index 90d5c91..51036c9 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
-#include "ast-inl.h"
+#include "ast.h"
 #include "bootstrapper.h"
 #include "char-predicates-inl.h"
 #include "codegen.h"
@@ -407,9 +407,9 @@
 }
 
 
-Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
+Scope* Parser::NewScope(Scope* parent, ScopeType type) {
   Scope* result = new(zone()) Scope(parent, type);
-  result->Initialize(inside_with);
+  result->Initialize();
   return result;
 }
 
@@ -459,26 +459,42 @@
 
 
 // ----------------------------------------------------------------------------
-// LexicalScope is a support class to facilitate manipulation of the
-// Parser's scope stack. The constructor sets the parser's top scope
-// to the incoming scope, and the destructor resets it.
-//
-// Additionally, it stores transient information used during parsing.
-// These scopes are not kept around after parsing or referenced by syntax
-// trees so they can be stack-allocated and hence used by the pre-parser.
+// FunctionState and BlockState together implement the parser's scope stack.
+// The parser's current scope is in top_scope_.  The BlockState and
+// FunctionState constructors push on the scope stack and the destructors
+// pop.  They are also used to hold the parser's per-function and per-block
+// state.
 
-class LexicalScope BASE_EMBEDDED {
+class Parser::BlockState BASE_EMBEDDED {
  public:
-  LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
-  ~LexicalScope();
+  BlockState(Parser* parser, Scope* scope)
+      : parser_(parser),
+        outer_scope_(parser->top_scope_) {
+    parser->top_scope_ = scope;
+  }
+
+  ~BlockState() { parser_->top_scope_ = outer_scope_; }
+
+ private:
+  Parser* parser_;
+  Scope* outer_scope_;
+};
+
+
+class Parser::FunctionState BASE_EMBEDDED {
+ public:
+  FunctionState(Parser* parser, Scope* scope, Isolate* isolate);
+  ~FunctionState();
 
   int NextMaterializedLiteralIndex() {
-    int next_index =
-        materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
-    materialized_literal_count_++;
-    return next_index;
+    return next_materialized_literal_index_++;
   }
-  int materialized_literal_count() { return materialized_literal_count_; }
+  int materialized_literal_count() {
+    return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+  }
+
+  int NextHandlerIndex() { return next_handler_index_++; }
+  int handler_count() { return next_handler_index_; }
 
   void SetThisPropertyAssignmentInfo(
       bool only_simple_this_property_assignments,
@@ -498,10 +514,13 @@
   int expected_property_count() { return expected_property_count_; }
 
  private:
-  // Captures the number of literals that need materialization in the
-  // function.  Includes regexp literals, and boilerplate for object
-  // and array literals.
-  int materialized_literal_count_;
+  // Used to assign an index to each literal that needs materialization in
+  // the function.  Includes regexp literals, and boilerplate for object and
+  // array literals.
+  int next_materialized_literal_index_;
+
+  // Used to assign a per-function index to try and catch handlers.
+  int next_handler_index_;
 
   // Properties count estimation.
   int expected_property_count_;
@@ -511,38 +530,35 @@
   bool only_simple_this_property_assignments_;
   Handle<FixedArray> this_property_assignments_;
 
-  // Bookkeeping
   Parser* parser_;
-  // Previous values
-  LexicalScope* lexical_scope_parent_;
-  Scope* previous_scope_;
-  int previous_with_nesting_level_;
-  unsigned previous_ast_node_id_;
+  FunctionState* outer_function_state_;
+  Scope* outer_scope_;
+  unsigned saved_ast_node_id_;
 };
 
 
-LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
-  : materialized_literal_count_(0),
-    expected_property_count_(0),
-    only_simple_this_property_assignments_(false),
-    this_property_assignments_(isolate->factory()->empty_fixed_array()),
-    parser_(parser),
-    lexical_scope_parent_(parser->lexical_scope_),
-    previous_scope_(parser->top_scope_),
-    previous_with_nesting_level_(parser->with_nesting_level_),
-    previous_ast_node_id_(isolate->ast_node_id()) {
+Parser::FunctionState::FunctionState(Parser* parser,
+                                     Scope* scope,
+                                     Isolate* isolate)
+    : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+      next_handler_index_(0),
+      expected_property_count_(0),
+      only_simple_this_property_assignments_(false),
+      this_property_assignments_(isolate->factory()->empty_fixed_array()),
+      parser_(parser),
+      outer_function_state_(parser->current_function_state_),
+      outer_scope_(parser->top_scope_),
+      saved_ast_node_id_(isolate->ast_node_id()) {
   parser->top_scope_ = scope;
-  parser->lexical_scope_ = this;
-  parser->with_nesting_level_ = 0;
+  parser->current_function_state_ = this;
   isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
 }
 
 
-LexicalScope::~LexicalScope() {
-  parser_->top_scope_ = previous_scope_;
-  parser_->lexical_scope_ = lexical_scope_parent_;
-  parser_->with_nesting_level_ = previous_with_nesting_level_;
-  parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
+Parser::FunctionState::~FunctionState() {
+  parser_->top_scope_ = outer_scope_;
+  parser_->current_function_state_ = outer_function_state_;
+  parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
 }
 
 
@@ -570,34 +586,36 @@
 // Implementation of Parser
 
 Parser::Parser(Handle<Script> script,
-               bool allow_natives_syntax,
+               int parser_flags,
                v8::Extension* extension,
                ScriptDataImpl* pre_data)
     : isolate_(script->GetIsolate()),
       symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
       script_(script),
       scanner_(isolate_->unicode_cache()),
+      reusable_preparser_(NULL),
       top_scope_(NULL),
-      with_nesting_level_(0),
-      lexical_scope_(NULL),
+      current_function_state_(NULL),
       target_stack_(NULL),
-      allow_natives_syntax_(allow_natives_syntax),
       extension_(extension),
       pre_data_(pre_data),
       fni_(NULL),
+      allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
+      allow_lazy_((parser_flags & kAllowLazy) != 0),
       stack_overflow_(false),
-      parenthesized_function_(false),
-      harmony_block_scoping_(false) {
+      parenthesized_function_(false) {
   AstNode::ResetIds();
+  if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
+    scanner().SetHarmonyScoping(true);
+  }
 }
 
 
-FunctionLiteral* Parser::ParseProgram(Handle<String> source,
-                                      bool in_global_context,
-                                      StrictModeFlag strict_mode) {
+FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
   ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
+  Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
   fni_ = new(zone()) FuncNameInferrer(isolate());
 
@@ -610,47 +628,48 @@
     ExternalTwoByteStringUC16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+    return DoParseProgram(info, source, &zone_scope);
   } else {
     GenericStringUC16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
+    return DoParseProgram(info, source, &zone_scope);
   }
 }
 
 
-FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
-                                        bool in_global_context,
-                                        StrictModeFlag strict_mode,
+FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
+                                        Handle<String> source,
                                         ZoneScope* zone_scope) {
+  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
   if (pre_data_ != NULL) pre_data_->Initialize();
 
   // Compute the parsing mode.
-  mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
+  mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
   if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
 
-  Scope::Type type =
-    in_global_context
-      ? Scope::GLOBAL_SCOPE
-      : Scope::EVAL_SCOPE;
   Handle<String> no_name = isolate()->factory()->empty_symbol();
 
   FunctionLiteral* result = NULL;
-  { Scope* scope = NewScope(top_scope_, type, inside_with());
-    LexicalScope lexical_scope(this, scope, isolate());
-    if (strict_mode == kStrictMode) {
-      top_scope_->EnableStrictMode();
+  { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+    info->SetGlobalScope(scope);
+    if (!info->is_global()) {
+      scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+      scope = NewScope(scope, EVAL_SCOPE);
     }
+    scope->set_start_position(0);
+    scope->set_end_position(source->length());
+    FunctionState function_state(this, scope, isolate());
+    top_scope_->SetLanguageMode(info->language_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
     ParseSourceElements(body, Token::EOS, &ok);
-    if (ok && top_scope_->is_strict_mode()) {
+    if (ok && !top_scope_->is_classic_mode()) {
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
 
-    if (ok && harmony_block_scoping_) {
+    if (ok && is_extended_mode()) {
       CheckConflictingVarDeclarations(scope, &ok);
     }
 
@@ -660,13 +679,12 @@
           no_name,
           top_scope_,
           body,
-          lexical_scope.materialized_literal_count(),
-          lexical_scope.expected_property_count(),
-          lexical_scope.only_simple_this_property_assignments(),
-          lexical_scope.this_property_assignments(),
+          function_state.materialized_literal_count(),
+          function_state.expected_property_count(),
+          function_state.handler_count(),
+          function_state.only_simple_this_property_assignments(),
+          function_state.this_property_assignments(),
           0,
-          0,
-          source->length(),
           FunctionLiteral::ANONYMOUS_EXPRESSION,
           false);  // Does not have duplicate parameters.
     } else if (stack_overflow_) {
@@ -714,6 +732,7 @@
                                    ZoneScope* zone_scope) {
   Handle<SharedFunctionInfo> shared_info = info->shared_info();
   scanner_.Initialize(source);
+  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
@@ -727,16 +746,17 @@
 
   {
     // Parse the function literal.
-    Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
+    Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+    info->SetGlobalScope(scope);
     if (!info->closure().is_null()) {
-      scope = Scope::DeserializeScopeChain(info, scope);
+      scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
     }
-    LexicalScope lexical_scope(this, scope, isolate());
-
-    if (shared_info->strict_mode()) {
-      top_scope_->EnableStrictMode();
-    }
-
+    FunctionState function_state(this, scope, isolate());
+    ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+    ASSERT(scope->language_mode() != EXTENDED_MODE ||
+           info->is_extended_mode());
+    ASSERT(info->language_mode() == shared_info->language_mode());
+    scope->SetLanguageMode(shared_info->language_mode());
     FunctionLiteral::Type type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
               ? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -817,10 +837,6 @@
   isolate()->Throw(*result, &location);
 }
 
-void Parser::SetHarmonyBlockScoping(bool block_scoping) {
-  scanner().SetHarmonyBlockScoping(block_scoping);
-  harmony_block_scoping_ = block_scoping;
-}
 
 // Base class containing common code for the different finder classes used by
 // the parser.
@@ -957,17 +973,18 @@
 };
 
 
-// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
 // this.x = ...;, where x is a named property. It also determines whether a
 // function contains only assignments of this type.
-class ThisNamedPropertyAssigmentFinder : public ParserFinder {
+class ThisNamedPropertyAssignmentFinder : public ParserFinder {
  public:
-  explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
+  explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
       : isolate_(isolate),
         only_simple_this_property_assignments_(true),
-        names_(NULL),
-        assigned_arguments_(NULL),
-        assigned_constants_(NULL) {}
+        names_(0),
+        assigned_arguments_(0),
+        assigned_constants_(0) {
+  }
 
   void Update(Scope* scope, Statement* stat) {
     // Bail out if function already has property assignment that are
@@ -994,19 +1011,17 @@
   // Returns a fixed array containing three elements for each assignment of the
   // form this.x = y;
   Handle<FixedArray> GetThisPropertyAssignments() {
-    if (names_ == NULL) {
+    if (names_.is_empty()) {
       return isolate_->factory()->empty_fixed_array();
     }
-    ASSERT(names_ != NULL);
-    ASSERT(assigned_arguments_ != NULL);
-    ASSERT_EQ(names_->length(), assigned_arguments_->length());
-    ASSERT_EQ(names_->length(), assigned_constants_->length());
+    ASSERT_EQ(names_.length(), assigned_arguments_.length());
+    ASSERT_EQ(names_.length(), assigned_constants_.length());
     Handle<FixedArray> assignments =
-        isolate_->factory()->NewFixedArray(names_->length() * 3);
-    for (int i = 0; i < names_->length(); i++) {
-      assignments->set(i * 3, *names_->at(i));
-      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
-      assignments->set(i * 3 + 2, *assigned_constants_->at(i));
+        isolate_->factory()->NewFixedArray(names_.length() * 3);
+    for (int i = 0; i < names_.length(); ++i) {
+      assignments->set(i * 3, *names_[i]);
+      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
+      assignments->set(i * 3 + 2, *assigned_constants_[i]);
     }
     return assignments;
   }
@@ -1063,18 +1078,37 @@
     AssignmentFromSomethingElse();
   }
 
+
+
+
+  // We will potentially reorder the property assignments, so they must be
+  // simple enough that the ordering does not matter.
   void AssignmentFromParameter(Handle<String> name, int index) {
-    EnsureAllocation();
-    names_->Add(name);
-    assigned_arguments_->Add(index);
-    assigned_constants_->Add(isolate_->factory()->undefined_value());
+    EnsureInitialized();
+    for (int i = 0; i < names_.length(); ++i) {
+      if (name->Equals(*names_[i])) {
+        assigned_arguments_[i] = index;
+        assigned_constants_[i] = isolate_->factory()->undefined_value();
+        return;
+      }
+    }
+    names_.Add(name);
+    assigned_arguments_.Add(index);
+    assigned_constants_.Add(isolate_->factory()->undefined_value());
   }
 
   void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
-    EnsureAllocation();
-    names_->Add(name);
-    assigned_arguments_->Add(-1);
-    assigned_constants_->Add(value);
+    EnsureInitialized();
+    for (int i = 0; i < names_.length(); ++i) {
+      if (name->Equals(*names_[i])) {
+        assigned_arguments_[i] = -1;
+        assigned_constants_[i] = value;
+        return;
+      }
+    }
+    names_.Add(name);
+    assigned_arguments_.Add(-1);
+    assigned_constants_.Add(value);
   }
 
   void AssignmentFromSomethingElse() {
@@ -1082,41 +1116,42 @@
     only_simple_this_property_assignments_ = false;
   }
 
-  void EnsureAllocation() {
-    if (names_ == NULL) {
-      ASSERT(assigned_arguments_ == NULL);
-      ASSERT(assigned_constants_ == NULL);
-      Zone* zone = isolate_->zone();
-      names_ = new(zone) ZoneStringList(4);
-      assigned_arguments_ = new(zone) ZoneList<int>(4);
-      assigned_constants_ = new(zone) ZoneObjectList(4);
+  void EnsureInitialized() {
+    if (names_.capacity() == 0) {
+      ASSERT(assigned_arguments_.capacity() == 0);
+      ASSERT(assigned_constants_.capacity() == 0);
+      names_.Initialize(4);
+      assigned_arguments_.Initialize(4);
+      assigned_constants_.Initialize(4);
     }
   }
 
   Isolate* isolate_;
   bool only_simple_this_property_assignments_;
-  ZoneStringList* names_;
-  ZoneList<int>* assigned_arguments_;
-  ZoneObjectList* assigned_constants_;
+  ZoneStringList names_;
+  ZoneList<int> assigned_arguments_;
+  ZoneObjectList assigned_constants_;
 };
 
 
 Statement* Parser::ParseSourceElement(ZoneStringList* labels,
                                       bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // SourceElement:
+  //    LetDeclaration
+  //    ConstDeclaration
+
   if (peek() == Token::FUNCTION) {
-    // FunctionDeclaration is only allowed in the context of SourceElements
-    // (Ecma 262 5th Edition, clause 14):
-    // SourceElement:
-    //    Statement
-    //    FunctionDeclaration
-    // Common language extension is to allow function declaration in place
-    // of any statement. This language extension is disabled in strict mode.
     return ParseFunctionDeclaration(ok);
-  } else if (peek() == Token::LET) {
+  } else if (peek() == Token::LET || peek() == Token::CONST) {
     return ParseVariableStatement(kSourceElement, ok);
-  } else {
-    return ParseStatement(labels, ok);
   }
+  return ParseStatement(labels, ok);
 }
 
 
@@ -1124,7 +1159,7 @@
                                   int end_token,
                                   bool* ok) {
   // SourceElements ::
-  //   (Statement)* <end_token>
+  //   (SourceElement)* <end_token>
 
   // Allocate a target stack to use for this set of source
   // elements. This way, all scripts and functions get their own
@@ -1134,7 +1169,7 @@
 
   ASSERT(processor != NULL);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
-  ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
+  ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
   bool directive_prologue = true;     // Parsing directive prologue.
 
   while (peek() != end_token) {
@@ -1160,11 +1195,13 @@
         Handle<String> directive = Handle<String>::cast(literal->handle());
 
         // Check "use strict" directive (ES5 14.1).
-        if (!top_scope_->is_strict_mode() &&
+        if (top_scope_->is_classic_mode() &&
             directive->Equals(isolate()->heap()->use_strict()) &&
             token_loc.end_pos - token_loc.beg_pos ==
               isolate()->heap()->use_strict()->length() + 2) {
-          top_scope_->EnableStrictMode();
+          // TODO(ES6): Fix entering extended mode, once it is specified.
+          top_scope_->SetLanguageMode(FLAG_harmony_scoping
+                                      ? EXTENDED_MODE : STRICT_MODE);
           // "use strict" is the only directive for now.
           directive_prologue = false;
         }
@@ -1188,7 +1225,7 @@
         this_property_assignment_finder.only_simple_this_property_assignments()
         && top_scope_->declarations()->length() == 0;
     if (only_simple_this_property_assignments) {
-      lexical_scope_->SetThisPropertyAssignmentInfo(
+      current_function_state_->SetThisPropertyAssignmentInfo(
           only_simple_this_property_assignments,
           this_property_assignment_finder.GetThisPropertyAssignments());
     }
@@ -1230,6 +1267,7 @@
       return ParseBlock(labels, ok);
 
     case Token::CONST:  // fall through
+    case Token::LET:
     case Token::VAR:
       stmt = ParseVariableStatement(kStatement, ok);
       break;
@@ -1295,9 +1333,14 @@
     }
 
     case Token::FUNCTION: {
-      // In strict mode, FunctionDeclaration is only allowed in the context
-      // of SourceElements.
-      if (top_scope_->is_strict_mode()) {
+      // FunctionDeclaration is only allowed in the context of SourceElements
+      // (Ecma 262 5th Edition, clause 14):
+      // SourceElement:
+      //    Statement
+      //    FunctionDeclaration
+      // Common language extension is to allow function declaration in place
+      // of any statement. This language extension is disabled in strict mode.
+      if (!top_scope_->is_classic_mode()) {
         ReportMessageAt(scanner().peek_location(), "strict_function",
                         Vector<const char*>::empty());
         *ok = false;
@@ -1321,7 +1364,7 @@
 
 
 VariableProxy* Parser::Declare(Handle<String> name,
-                               Variable::Mode mode,
+                               VariableMode mode,
                                FunctionLiteral* fun,
                                bool resolve,
                                bool* ok) {
@@ -1329,6 +1372,12 @@
   // If we are inside a function, a declaration of a var/const variable is a
   // truly local variable, and the scope of the variable is always the function
   // scope.
+  // Let/const variables in harmony mode are always added to the immediately
+  // enclosing scope.
+  Scope* declaration_scope = (mode == LET || mode == CONST_HARMONY)
+      ? top_scope_ : top_scope_->DeclarationScope();
+  InitializationFlag init_flag = (fun != NULL || mode == VAR)
+      ? kCreatedInitialized : kNeedsInitialization;
 
   // If a function scope exists, then we can statically declare this
   // variable and also set its mode. In any case, a Declaration node
@@ -1338,17 +1387,16 @@
   // to the calling function context.
   // Similarly, strict mode eval scope does not leak variable declarations to
   // the caller's scope so we declare all locals, too.
-
-  Scope* declaration_scope = mode == Variable::LET ? top_scope_
-      : top_scope_->DeclarationScope();
+  // Also for block scoped let/const bindings the variable can be
+  // statically declared.
   if (declaration_scope->is_function_scope() ||
-      declaration_scope->is_strict_mode_eval_scope() ||
+      declaration_scope->is_strict_or_extended_eval_scope() ||
       declaration_scope->is_block_scope()) {
     // Declare the variable in the function scope.
     var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(name, mode);
+      var = declaration_scope->DeclareLocal(name, mode, init_flag);
     } else {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
@@ -1361,12 +1409,13 @@
       //
       // because the var declaration is hoisted to the function scope where 'x'
       // is already bound.
-      if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
+      if ((mode != VAR) || (var->mode() != VAR)) {
         // We only have vars, consts and lets in declarations.
-        ASSERT(var->mode() == Variable::VAR ||
-               var->mode() == Variable::CONST ||
-               var->mode() == Variable::LET);
-        if (harmony_block_scoping_) {
+        ASSERT(var->mode() == VAR ||
+               var->mode() == CONST ||
+               var->mode() == CONST_HARMONY ||
+               var->mode() == LET);
+        if (is_extended_mode()) {
           // In harmony mode we treat re-declarations as early errors. See
           // ES5 16 for a definition of early errors.
           SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
@@ -1376,8 +1425,8 @@
           *ok = false;
           return NULL;
         }
-        const char* type = (var->mode() == Variable::VAR) ? "var" :
-                           (var->mode() == Variable::CONST) ? "const" : "let";
+        const char* type = (var->mode() == VAR)
+            ? "var" : var->is_const_mode() ? "const" : "let";
         Handle<String> type_string =
             isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
         Expression* expression =
@@ -1405,19 +1454,35 @@
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
   VariableProxy* proxy = declaration_scope->NewUnresolved(
-      name, false, scanner().location().beg_pos);
+      name, scanner().location().beg_pos);
   declaration_scope->AddDeclaration(
       new(zone()) Declaration(proxy, mode, fun, top_scope_));
 
-  // For global const variables we bind the proxy to a variable.
-  if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
+  if ((mode == CONST || mode == CONST_HARMONY) &&
+      declaration_scope->is_global_scope()) {
+    // For global const variables we bind the proxy to a variable.
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
     var = new(zone()) Variable(declaration_scope,
                                name,
-                               Variable::CONST,
+                               mode,
                                true,
-                               kind);
+                               kind,
+                               kNeedsInitialization);
+  } else if (declaration_scope->is_eval_scope() &&
+             declaration_scope->is_classic_mode()) {
+    // For variable declarations in a non-strict eval scope the proxy is bound
+    // to a lookup variable to force a dynamic declaration using the
+    // DeclareContextSlot runtime function.
+    Variable::Kind kind = Variable::NORMAL;
+    var = new(zone()) Variable(declaration_scope,
+                               name,
+                               mode,
+                               true,
+                               kind,
+                               init_flag);
+    var->AllocateTo(Variable::LOOKUP, -1);
+    resolve = true;
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1487,7 +1552,7 @@
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared =
       isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
-          Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
+          Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the shared function info.
@@ -1500,7 +1565,7 @@
   // other functions are setup when entering the surrounding scope.
   SharedFunctionInfoLiteral* lit =
       new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
-  VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
+  VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK);
   return new(zone()) ExpressionStatement(new(zone()) Assignment(
       isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
 }
@@ -1522,14 +1587,14 @@
   // Even if we're not at the top-level of the global or a function
   // scope, we treat is as such and introduce the function with it's
   // initial value upon entering the corresponding scope.
-  Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
+  VariableMode mode = is_extended_mode() ? LET : VAR;
   Declare(name, mode, fun, true, CHECK_OK);
   return EmptyStatement();
 }
 
 
 Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
-  if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
+  if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
 
   // Block ::
   //   '{' Statement* '}'
@@ -1555,22 +1620,21 @@
 
 
 Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+  // The harmony mode uses source elements instead of statements.
+  //
+  // Block ::
+  //   '{' SourceElement* '}'
+
   // Construct block expecting 16 statements.
   Block* body = new(zone()) Block(isolate(), labels, 16, false);
-  Scope* saved_scope = top_scope_;
-  Scope* block_scope = NewScope(top_scope_,
-                                Scope::BLOCK_SCOPE,
-                                inside_with());
-  if (top_scope_->is_strict_mode()) {
-    block_scope->EnableStrictMode();
-  }
-  top_scope_ = block_scope;
+  Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
 
   // Parse the statements and collect escaping labels.
-  TargetCollector collector;
-  Target target(&this->target_stack_, &collector);
   Expect(Token::LBRACE, CHECK_OK);
-  {
+  block_scope->set_start_position(scanner().location().beg_pos);
+  { BlockState block_state(this, block_scope);
+    TargetCollector collector;
+    Target target(&this->target_stack_, &collector);
     Target target_body(&this->target_stack_, body);
     InitializationBlockFinder block_finder(top_scope_, target_stack_);
 
@@ -1583,8 +1647,7 @@
     }
   }
   Expect(Token::RBRACE, CHECK_OK);
-  top_scope_ = saved_scope;
-
+  block_scope->set_end_position(scanner().location().end_pos);
   block_scope = block_scope->FinalizeBlockScope();
   body->set_block_scope(block_scope);
   return body;
@@ -1598,6 +1661,7 @@
 
   Handle<String> ignore;
   Block* result = ParseVariableDeclarations(var_context,
+                                            NULL,
                                             &ignore,
                                             CHECK_OK);
   ExpectSemicolon(CHECK_OK);
@@ -1616,13 +1680,25 @@
 // *var is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is used for the parsing
 // of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                         Handle<String>* out,
-                                         bool* ok) {
+Block* Parser::ParseVariableDeclarations(
+    VariableDeclarationContext var_context,
+    VariableDeclarationProperties* decl_props,
+    Handle<String>* out,
+    bool* ok) {
   // VariableDeclarations ::
-  //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
-  Variable::Mode mode = Variable::VAR;
+  //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
+  //
+  // The ES6 Draft Rev3 specifies the following grammar for const declarations
+  //
+  // ConstDeclaration ::
+  //   const ConstBinding (',' ConstBinding)* ';'
+  // ConstBinding ::
+  //   Identifier '=' AssignmentExpression
+  //
+  // TODO(ES6):
+  // ConstBinding ::
+  //   BindingPattern '=' AssignmentExpression
+  VariableMode mode = VAR;
   // True if the binding needs initialization. 'let' and 'const' declared
   // bindings are created uninitialized by their declaration nodes and
   // need initialization. 'var' declared bindings are always initialized
@@ -1633,33 +1709,69 @@
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
   } else if (peek() == Token::CONST) {
+    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+    //
+    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    //
+    // However disallowing const in classic mode will break compatibility with
+    // existing pages. Therefore we keep allowing const with the old
+    // non-harmony semantics in classic mode.
     Consume(Token::CONST);
-    if (top_scope_->is_strict_mode()) {
-      ReportMessage("strict_const", Vector<const char*>::empty());
+    switch (top_scope_->language_mode()) {
+      case CLASSIC_MODE:
+        mode = CONST;
+        init_op = Token::INIT_CONST;
+        break;
+      case STRICT_MODE:
+        ReportMessage("strict_const", Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      case EXTENDED_MODE:
+        if (var_context != kSourceElement &&
+            var_context != kForStatement) {
+          // In extended mode 'const' declarations are only allowed in source
+          // element positions.
+          ReportMessage("unprotected_const", Vector<const char*>::empty());
+          *ok = false;
+          return NULL;
+        }
+        mode = CONST_HARMONY;
+        init_op = Token::INIT_CONST_HARMONY;
+    }
+    is_const = true;
+    needs_init = true;
+  } else if (peek() == Token::LET) {
+    // ES6 Draft Rev4 section 12.2.1:
+    //
+    // LetDeclaration : let LetBindingList ;
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    if (!is_extended_mode()) {
+      ReportMessage("illegal_let", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
-    mode = Variable::CONST;
-    is_const = true;
-    needs_init = true;
-    init_op = Token::INIT_CONST;
-  } else if (peek() == Token::LET) {
     Consume(Token::LET);
     if (var_context != kSourceElement &&
         var_context != kForStatement) {
+      // Let declarations are only allowed in source element positions.
       ASSERT(var_context == kStatement);
       ReportMessage("unprotected_let", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
-    mode = Variable::LET;
+    mode = LET;
     needs_init = true;
     init_op = Token::INIT_LET;
   } else {
     UNREACHABLE();  // by current callers
   }
 
-  Scope* declaration_scope = mode == Variable::LET
+  Scope* declaration_scope = (mode == LET || mode == CONST_HARMONY)
       ? top_scope_ : top_scope_->DeclarationScope();
   // The scope of a var/const declared variable anywhere inside a function
   // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
@@ -1686,7 +1798,7 @@
     if (fni_ != NULL) fni_->PushVariableName(name);
 
     // Strict mode variables may not be named eval or arguments
-    if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
+    if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_var_name", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -1704,8 +1816,10 @@
     // If we have a const declaration, in an inner scope, the proxy is always
     // bound to the declared variable (independent of possibly surrounding with
     // statements).
-    Declare(name, mode, NULL, is_const /* always bound for CONST! */,
-            CHECK_OK);
+    // For let/const declarations in harmony mode, we can also immediately
+    // pre-resolve the proxy because it resides in the same scope as the
+    // declaration.
+    VariableProxy* proxy = Declare(name, mode, NULL, mode != VAR, CHECK_OK);
     nvars++;
     if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
       ReportMessageAt(scanner().location(), "too_many_variables",
@@ -1744,7 +1858,8 @@
     Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
     Expression* value = NULL;
     int position = -1;
-    if (peek() == Token::ASSIGN) {
+    // Harmony consts have non-optional initializers.
+    if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
       Expect(Token::ASSIGN, CHECK_OK);
       position = scanner().location().beg_pos;
       value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -1756,6 +1871,12 @@
       } else {
         fni_->RemoveLastFunction();
       }
+      if (decl_props != NULL) *decl_props = kHasInitializers;
+    }
+
+    // Record the end position of the initializer.
+    if (proxy->var() != NULL) {
+      proxy->var()->set_initializer_position(scanner().location().end_pos);
     }
 
     // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
@@ -1782,7 +1903,6 @@
     // declaration statement has been executed. This is important in
     // browsers where the global object (window) has lots of
     // properties defined in prototype objects.
-
     if (initialization_scope->is_global_scope()) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
@@ -1807,10 +1927,8 @@
       } else {
         // Add strict mode.
         // We may want to pass singleton to avoid Literal allocations.
-        StrictModeFlag flag = initialization_scope->is_strict_mode()
-            ? kStrictMode
-            : kNonStrictMode;
-        arguments->Add(NewNumberLiteral(flag));
+        LanguageMode language_mode = initialization_scope->language_mode();
+        arguments->Add(NewNumberLiteral(language_mode));
 
         // Be careful not to assign a value to the global variable if
         // we're in a with. The initialization value should not
@@ -1834,30 +1952,34 @@
       }
 
       block->AddStatement(new(zone()) ExpressionStatement(initialize));
+    } else if (needs_init) {
+      // Constant initializations always assign to the declared constant which
+      // is always at the function scope level. This is only relevant for
+      // dynamically looked-up variables and constants (the start context for
+      // constant lookups is always the function context, while it is the top
+      // context for var declared variables). Sigh...
+      // For 'let' and 'const' declared variables in harmony mode the
+      // initialization also always assigns to the declared variable.
+      ASSERT(proxy != NULL);
+      ASSERT(proxy->var() != NULL);
+      ASSERT(value != NULL);
+      Assignment* assignment =
+          new(zone()) Assignment(isolate(), init_op, proxy, value, position);
+      block->AddStatement(new(zone()) ExpressionStatement(assignment));
+      value = NULL;
     }
 
     // Add an assignment node to the initialization statement block if we still
-    // have a pending initialization value. We must distinguish between
-    // different kinds of declarations: 'var' initializations are simply
-    // assignments (with all the consequences if they are inside a 'with'
-    // statement - they may change a 'with' object property). Constant
-    // initializations always assign to the declared constant which is
-    // always at the function scope level. This is only relevant for
-    // dynamically looked-up variables and constants (the start context
-    // for constant lookups is always the function context, while it is
-    // the top context for var declared variables). Sigh...
-    // For 'let' declared variables the initialization is in the same scope
-    // as the declaration. Thus dynamic lookups are unnecessary even if the
-    // block scope is inside a with.
+    // have a pending initialization value.
     if (value != NULL) {
-      bool in_with = mode == Variable::VAR ? inside_with() : false;
-      VariableProxy* proxy =
-          initialization_scope->NewUnresolved(name, in_with);
+      ASSERT(mode == VAR);
+      // 'var' initializations are simply assignments (with all the consequences
+      // if they are inside a 'with' statement - they may change a 'with' object
+      // property).
+      VariableProxy* proxy = initialization_scope->NewUnresolved(name);
       Assignment* assignment =
           new(zone()) Assignment(isolate(), init_op, proxy, value, position);
-      if (block) {
-        block->AddStatement(new(zone()) ExpressionStatement(assignment));
-      }
+      block->AddStatement(new(zone()) ExpressionStatement(assignment));
     }
 
     if (fni_ != NULL) fni_->Leave();
@@ -2072,7 +2194,7 @@
 
   Expect(Token::WITH, CHECK_OK);
 
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     ReportMessage("strict_mode_with", Vector<const char*>::empty());
     *ok = false;
     return NULL;
@@ -2082,10 +2204,14 @@
   Expression* expr = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
-  ++with_nesting_level_;
   top_scope_->DeclarationScope()->RecordWithStatement();
-  Statement* stmt = ParseStatement(labels, CHECK_OK);
-  --with_nesting_level_;
+  Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+  Statement* stmt;
+  { BlockState block_state(this, with_scope);
+    with_scope->set_start_position(scanner().peek_location().beg_pos);
+    stmt = ParseStatement(labels, CHECK_OK);
+    with_scope->set_end_position(scanner().location().end_pos);
+  }
   return new(zone()) WithStatement(expr, stmt);
 }
 
@@ -2210,9 +2336,11 @@
     Consume(Token::CATCH);
 
     Expect(Token::LPAREN, CHECK_OK);
+    catch_scope = NewScope(top_scope_, CATCH_SCOPE);
+    catch_scope->set_start_position(scanner().location().beg_pos);
     name = ParseIdentifier(CHECK_OK);
 
-    if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+    if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_catch_variable", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -2222,22 +2350,16 @@
 
     if (peek() == Token::LBRACE) {
       Target target(&this->target_stack_, &catch_collector);
-      catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
-      if (top_scope_->is_strict_mode()) {
-        catch_scope->EnableStrictMode();
-      }
-      Variable::Mode mode = harmony_block_scoping_
-          ? Variable::LET : Variable::VAR;
-      catch_variable = catch_scope->DeclareLocal(name, mode);
+      VariableMode mode = is_extended_mode() ? LET : VAR;
+      catch_variable =
+          catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
 
-      Scope* saved_scope = top_scope_;
-      top_scope_ = catch_scope;
+      BlockState block_state(this, catch_scope);
       catch_block = ParseBlock(NULL, CHECK_OK);
-      top_scope_ = saved_scope;
     } else {
       Expect(Token::LBRACE, CHECK_OK);
     }
-
+    catch_scope->set_end_position(scanner().location().end_pos);
     tok = peek();
   }
 
@@ -2255,11 +2377,12 @@
   if (catch_block != NULL && finally_block != NULL) {
     // If we have both, create an inner try/catch.
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    TryCatchStatement* statement =
-        new(zone()) TryCatchStatement(try_block,
-                                      catch_scope,
-                                      catch_variable,
-                                      catch_block);
+    int index = current_function_state_->NextHandlerIndex();
+    TryCatchStatement* statement = new(zone()) TryCatchStatement(index,
+                                                                 try_block,
+                                                                 catch_scope,
+                                                                 catch_variable,
+                                                                 catch_block);
     statement->set_escaping_targets(try_collector.targets());
     try_block = new(zone()) Block(isolate(), NULL, 1, false);
     try_block->AddStatement(statement);
@@ -2270,14 +2393,18 @@
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    result =
-        new(zone()) TryCatchStatement(try_block,
-                                      catch_scope,
-                                      catch_variable,
-                                      catch_block);
+    int index = current_function_state_->NextHandlerIndex();
+    result = new(zone()) TryCatchStatement(index,
+                                           try_block,
+                                           catch_scope,
+                                           catch_variable,
+                                           catch_block);
   } else {
     ASSERT(finally_block != NULL);
-    result = new(zone()) TryFinallyStatement(try_block, finally_block);
+    int index = current_function_state_->NextHandlerIndex();
+    result = new(zone()) TryFinallyStatement(index,
+                                             try_block,
+                                             finally_block);
     // Combine the jump targets of the try block and the possible catch block.
     try_collector.targets()->AddAll(*catch_collector.targets());
   }
@@ -2343,16 +2470,22 @@
 
   Statement* init = NULL;
 
+  // Create an in-between scope for let-bound iteration variables.
+  Scope* saved_scope = top_scope_;
+  Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
+  top_scope_ = for_scope;
+
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
+  for_scope->set_start_position(scanner().location().beg_pos);
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST) {
       Handle<String> name;
       Block* variable_statement =
-          ParseVariableDeclarations(kForStatement, &name, CHECK_OK);
+          ParseVariableDeclarations(kForStatement, NULL, &name, CHECK_OK);
 
       if (peek() == Token::IN && !name.is_null()) {
-        VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+        VariableProxy* each = top_scope_->NewUnresolved(name);
         ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
@@ -2365,12 +2498,73 @@
         Block* result = new(zone()) Block(isolate(), NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        ASSERT(for_scope == NULL);
         // Parsed for-in loop w/ variable/const declaration.
         return result;
       } else {
         init = variable_statement;
       }
+    } else if (peek() == Token::LET) {
+      Handle<String> name;
+      VariableDeclarationProperties decl_props = kHasNoInitializers;
+      Block* variable_statement =
+          ParseVariableDeclarations(kForStatement,
+                                    &decl_props,
+                                    &name,
+                                    CHECK_OK);
+      bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
+      if (peek() == Token::IN && accept_IN) {
+        // Rewrite a for-in statement of the form
+        //
+        //   for (let x in e) b
+        //
+        // into
+        //
+        //   <let x' be a temporary variable>
+        //   for (x' in e) {
+        //     let x;
+        //     x = x';
+        //     b;
+        //   }
 
+        // TODO(keuchel): Move the temporary variable to the block scope, after
+        // implementing stack allocated block scoped variables.
+        Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
+        VariableProxy* temp_proxy = new(zone()) VariableProxy(isolate(), temp);
+        VariableProxy* each = top_scope_->NewUnresolved(name);
+        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
+        Target target(&this->target_stack_, loop);
+
+        Expect(Token::IN, CHECK_OK);
+        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expect(Token::RPAREN, CHECK_OK);
+
+        Statement* body = ParseStatement(NULL, CHECK_OK);
+        Block* body_block = new(zone()) Block(isolate(), NULL, 3, false);
+        Assignment* assignment = new(zone()) Assignment(isolate(),
+                                                        Token::ASSIGN,
+                                                        each,
+                                                        temp_proxy,
+                                                        RelocInfo::kNoPosition);
+        Statement* assignment_statement =
+            new(zone()) ExpressionStatement(assignment);
+        body_block->AddStatement(variable_statement);
+        body_block->AddStatement(assignment_statement);
+        body_block->AddStatement(body);
+        loop->Initialize(temp_proxy, enumerable, body_block);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        body_block->set_block_scope(for_scope);
+        // Parsed for-in loop w/ let declaration.
+        return loop;
+
+      } else {
+        init = variable_statement;
+      }
     } else {
       Expression* expression = ParseExpression(false, CHECK_OK);
       if (peek() == Token::IN) {
@@ -2392,6 +2586,10 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         if (loop) loop->Initialize(expression, enumerable, body);
+        top_scope_ = saved_scope;
+        for_scope->set_end_position(scanner().location().end_pos);
+        for_scope = for_scope->FinalizeBlockScope();
+        ASSERT(for_scope == NULL);
         // Parsed for-in loop.
         return loop;
 
@@ -2422,8 +2620,31 @@
   Expect(Token::RPAREN, CHECK_OK);
 
   Statement* body = ParseStatement(NULL, CHECK_OK);
-  if (loop) loop->Initialize(init, cond, next, body);
-  return loop;
+  top_scope_ = saved_scope;
+  for_scope->set_end_position(scanner().location().end_pos);
+  for_scope = for_scope->FinalizeBlockScope();
+  if (for_scope != NULL) {
+    // Rewrite a for statement of the form
+    //
+    //   for (let x = i; c; n) b
+    //
+    // into
+    //
+    //   {
+    //     let x = i;
+    //     for (; c; n) b
+    //   }
+    ASSERT(init != NULL);
+    Block* result = new(zone()) Block(isolate(), NULL, 2, false);
+    result->AddStatement(init);
+    result->AddStatement(loop);
+    result->set_block_scope(for_scope);
+    if (loop) loop->Initialize(NULL, cond, next, body);
+    return result;
+  } else {
+    if (loop) loop->Initialize(init, cond, next, body);
+    return loop;
+  }
 }
 
 
@@ -2470,7 +2691,7 @@
     expression = NewThrowReferenceError(type);
   }
 
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     // Assignment to eval or arguments is disallowed in strict mode.
     CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
   }
@@ -2489,13 +2710,13 @@
       property != NULL &&
       property->obj()->AsVariableProxy() != NULL &&
       property->obj()->AsVariableProxy()->is_this()) {
-    lexical_scope_->AddProperty();
+    current_function_state_->AddProperty();
   }
 
   // If we assign a function literal to a property we pretenure the
   // literal so it can be added as a constant function property.
   if (property != NULL && right->AsFunctionLiteral() != NULL) {
-    right->AsFunctionLiteral()->set_pretenure(true);
+    right->AsFunctionLiteral()->set_pretenure();
   }
 
   if (fni_ != NULL) {
@@ -2620,7 +2841,7 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        x = NewCompareNode(cmp, x, y, position);
+        x = new(zone()) CompareOperation(isolate(), cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
           x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
@@ -2636,27 +2857,6 @@
 }
 
 
-Expression* Parser::NewCompareNode(Token::Value op,
-                                   Expression* x,
-                                   Expression* y,
-                                   int position) {
-  ASSERT(op != Token::NE && op != Token::NE_STRICT);
-  if (op == Token::EQ || op == Token::EQ_STRICT) {
-    bool is_strict = (op == Token::EQ_STRICT);
-    Literal* x_literal = x->AsLiteral();
-    if (x_literal != NULL && x_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, y);
-    }
-
-    Literal* y_literal = y->AsLiteral();
-    if (y_literal != NULL && y_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, x);
-    }
-  }
-  return new(zone()) CompareOperation(isolate(), op, x, y, position);
-}
-
-
 Expression* Parser::ParseUnaryExpression(bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
@@ -2700,7 +2900,7 @@
     }
 
     // "delete identifier" is a syntax error in strict mode.
-    if (op == Token::DELETE && top_scope_->is_strict_mode()) {
+    if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
       VariableProxy* operand = expression->AsVariableProxy();
       if (operand != NULL && !operand->is_this()) {
         ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -2724,7 +2924,7 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       // Prefix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
@@ -2759,7 +2959,7 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       // Postfix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
@@ -2800,29 +3000,32 @@
       }
 
       case Token::LPAREN: {
-        int pos = scanner().location().beg_pos;
+        int pos;
+        if (scanner().current_token() == Token::IDENTIFIER) {
+          // For call of an identifier we want to report position of
+          // the identifier as position of the call in the stack trace.
+          pos = scanner().location().beg_pos;
+        } else {
+          // For other kinds of calls we record position of the parenthesis as
+          // position of the call.  Note that this is extremely important for
+          // expressions of the form function(){...}() for which call position
+          // should not point to the closing brace otherwise it will intersect
+          // with positions recorded for function literal and confuse debugger.
+          pos = scanner().peek_location().beg_pos;
+        }
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
 
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
         // The calls that need special treatment are the
-        // direct (i.e. not aliased) eval calls. These calls are all of the
-        // form eval(...) with no explicit receiver object where eval is not
-        // declared in the current scope chain.
+        // direct eval calls. These calls are all of the form eval(...), with
+        // no explicit receiver.
         // These calls are marked as potentially direct eval calls. Whether
         // they are actually direct calls to eval is determined at run time.
-        // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
-        // in the local scope chain. It only matters that it's called "eval",
-        // is called without a receiver and it refers to the original eval
-        // function.
         VariableProxy* callee = result->AsVariableProxy();
         if (callee != NULL &&
             callee->IsVariable(isolate()->factory()->eval_symbol())) {
-          Handle<String> name = callee->name();
-          Variable* var = top_scope_->Lookup(name);
-          if (var == NULL) {
-            top_scope_->DeclarationScope()->RecordEvalCall();
-          }
+          top_scope_->DeclarationScope()->RecordEvalCall();
         }
         result = NewCall(result, args, pos);
         break;
@@ -2999,9 +3202,9 @@
       return ReportMessage("unexpected_reserved",
                            Vector<const char*>::empty());
     case Token::FUTURE_STRICT_RESERVED_WORD:
-      return ReportMessage(top_scope_->is_strict_mode() ?
-                               "unexpected_strict_reserved" :
-                               "unexpected_token_identifier",
+      return ReportMessage(top_scope_->is_classic_mode() ?
+                               "unexpected_token_identifier" :
+                               "unexpected_strict_reserved",
                            Vector<const char*>::empty());
     default:
       const char* name = Token::String(token);
@@ -3064,9 +3267,7 @@
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
-      result = top_scope_->NewUnresolved(name,
-                                         inside_with(),
-                                         scanner().location().beg_pos);
+      result = top_scope_->NewUnresolved(name, scanner().location().beg_pos);
       break;
     }
 
@@ -3181,11 +3382,13 @@
   Expect(Token::RBRACK, CHECK_OK);
 
   // Update the scope information before the pre-parsing bailout.
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
-  // Allocate a fixed array with all the literals.
-  Handle<FixedArray> literals =
+  // Allocate a fixed array to hold all the object literals.
+  Handle<FixedArray> object_literals =
       isolate()->factory()->NewFixedArray(values->length(), TENURED);
+  Handle<FixedDoubleArray> double_literals;
+  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
 
   // Fill in the literals.
   bool is_simple = true;
@@ -3197,19 +3400,75 @@
     }
     Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
     if (boilerplate_value->IsUndefined()) {
-      literals->set_the_hole(i);
+      object_literals->set_the_hole(i);
+      if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+        double_literals->set_the_hole(i);
+      }
       is_simple = false;
     } else {
-      literals->set(i, *boilerplate_value);
+      // Examine each literal element, and adjust the ElementsKind if the
+      // literal element is not of a type that can be stored in the current
+      // ElementsKind.  Start with FAST_SMI_ONLY_ELEMENTS, and transition to
+      // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary.  Always remember
+      // the tagged value, no matter what the ElementsKind is in case we
+      // ultimately end up in FAST_ELEMENTS.
+      object_literals->set(i, *boilerplate_value);
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+        // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
+        // FAST_ELEMENTS is required.
+        if (!boilerplate_value->IsSmi()) {
+          if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
+            // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
+            // avoid over-allocating in TENURED space.
+            double_literals = isolate()->factory()->NewFixedDoubleArray(
+                values->length(), TENURED);
+            // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
+            // FAST_DOUBLE_ELEMENTS array so that they are in sync.
+            for (int j = 0; j < i; ++j) {
+              Object* smi_value = object_literals->get(j);
+              if (smi_value->IsTheHole()) {
+                double_literals->set_the_hole(j);
+              } else {
+                double_literals->set(j, Smi::cast(smi_value)->value());
+              }
+            }
+            double_literals->set(i, boilerplate_value->Number());
+            elements_kind = FAST_DOUBLE_ELEMENTS;
+          } else {
+            elements_kind = FAST_ELEMENTS;
+          }
+        }
+      } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+        // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
+        // until the first value is seen that can't be stored as a double.
+        if (boilerplate_value->IsNumber()) {
+          double_literals->set(i, boilerplate_value->Number());
+        } else {
+          elements_kind = FAST_ELEMENTS;
+        }
+      }
     }
   }
 
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
-  if (is_simple && depth == 1 && values->length() > 0) {
-    literals->set_map(isolate()->heap()->fixed_cow_array_map());
+  if (is_simple && depth == 1 && values->length() > 0 &&
+      elements_kind != FAST_DOUBLE_ELEMENTS) {
+    object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
+  Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
+      ? Handle<FixedArrayBase>(double_literals)
+      : Handle<FixedArrayBase>(object_literals);
+
+  // Remember both the literal's constant values as well as the ElementsKind
+  // in a 2-element FixedArray.
+  Handle<FixedArray> literals =
+      isolate()->factory()->NewFixedArray(2, TENURED);
+
+  literals->set(0, Smi::FromInt(elements_kind));
+  literals->set(1, *element_values);
+
   return new(zone()) ArrayLiteral(
       isolate(), literals, values, literal_index, is_simple, depth);
 }
@@ -3291,11 +3550,11 @@
 // Validation per 11.1.5 Object Initialiser
 class ObjectLiteralPropertyChecker {
  public:
-  ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
+  ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
     props(&IsEqualString),
     elems(&IsEqualNumber),
     parser_(parser),
-    strict_(strict) {
+    language_mode_(language_mode) {
   }
 
   void CheckProperty(
@@ -3325,7 +3584,7 @@
   HashMap props;
   HashMap elems;
   Parser* parser_;
-  bool strict_;
+  LanguageMode language_mode_;
 };
 
 
@@ -3374,8 +3633,8 @@
   intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
   intptr_t curr = GetPropertyKind(property);
 
-  // Duplicate data properties are illegal in strict mode.
-  if (strict_ && (curr & prev & kData) != 0) {
+  // Duplicate data properties are illegal in strict or extended mode.
+  if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
     parser_->ReportMessageAt(loc, "strict_duplicate_property",
                              Vector<const char*>::empty());
     *ok = false;
@@ -3511,7 +3770,7 @@
   int number_of_boilerplate_properties = 0;
   bool has_function = false;
 
-  ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
+  ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
 
   Expect(Token::LBRACE, CHECK_OK);
 
@@ -3599,11 +3858,13 @@
     ObjectLiteral::Property* property =
         new(zone()) ObjectLiteral::Property(key, value);
 
-    // Mark object literals that contain function literals and pretenure the
-    // literal so it can be added as a constant function property.
-    if (value->AsFunctionLiteral() != NULL) {
+    // Mark top-level object literals that contain function literals and
+    // pretenure the literal so it can be added as a constant function
+    // property.
+    if (top_scope_->DeclarationScope()->is_global_scope() &&
+        value->AsFunctionLiteral() != NULL) {
       has_function = true;
-      value->AsFunctionLiteral()->set_pretenure(true);
+      value->AsFunctionLiteral()->set_pretenure();
     }
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -3623,7 +3884,7 @@
   Expect(Token::RBRACE, CHECK_OK);
 
   // Computation of literal_index must happen before pre parse bailout.
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
   Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
       number_of_boilerplate_properties * 2, TENURED);
@@ -3655,7 +3916,7 @@
     return NULL;
   }
 
-  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
 
   Handle<String> js_pattern = NextLiteralString(TENURED);
   scanner().ScanRegExpFlags();
@@ -3691,6 +3952,98 @@
 }
 
 
+class SingletonLogger : public ParserRecorder {
+ public:
+  SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
+  ~SingletonLogger() { }
+
+  void Reset() { has_error_ = false; }
+
+  virtual void LogFunction(int start,
+                           int end,
+                           int literals,
+                           int properties,
+                           LanguageMode mode) {
+    ASSERT(!has_error_);
+    start_ = start;
+    end_ = end;
+    literals_ = literals;
+    properties_ = properties;
+    mode_ = mode;
+  };
+
+  // Logs a symbol creation of a literal or identifier.
+  virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
+  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
+
+  // Logs an error message and marks the log as containing an error.
+  // Further logging will be ignored, and ExtractData will return a vector
+  // representing the error only.
+  virtual void LogMessage(int start,
+                          int end,
+                          const char* message,
+                          const char* argument_opt) {
+    has_error_ = true;
+    start_ = start;
+    end_ = end;
+    message_ = message;
+    argument_opt_ = argument_opt;
+  }
+
+  virtual int function_position() { return 0; }
+
+  virtual int symbol_position() { return 0; }
+
+  virtual int symbol_ids() { return -1; }
+
+  virtual Vector<unsigned> ExtractData() {
+    UNREACHABLE();
+    return Vector<unsigned>();
+  }
+
+  virtual void PauseRecording() { }
+
+  virtual void ResumeRecording() { }
+
+  bool has_error() { return has_error_; }
+
+  int start() { return start_; }
+  int end() { return end_; }
+  int literals() {
+    ASSERT(!has_error_);
+    return literals_;
+  }
+  int properties() {
+    ASSERT(!has_error_);
+    return properties_;
+  }
+  LanguageMode language_mode() {
+    ASSERT(!has_error_);
+    return mode_;
+  }
+  const char* message() {
+    ASSERT(has_error_);
+    return message_;
+  }
+  const char* argument_opt() {
+    ASSERT(has_error_);
+    return argument_opt_;
+  }
+
+ private:
+  bool has_error_;
+  int start_;
+  int end_;
+  // For function entries.
+  int literals_;
+  int properties_;
+  LanguageMode mode_;
+  // For error messages.
+  const char* message_;
+  const char* argument_opt_;
+};
+
+
 FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
                                               bool name_is_strict_reserved,
                                               int function_token_position,
@@ -3713,26 +4066,24 @@
   // Function declarations are function scoped in normal mode, so they are
   // hoisted. In harmony block scoping mode they are block scoped, so they
   // are not hoisted.
-  Scope* scope = (type == FunctionLiteral::DECLARATION &&
-                  !harmony_block_scoping_)
-      ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
-      : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-  ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
-  int materialized_literal_count;
-  int expected_property_count;
-  int start_pos;
-  int end_pos;
+  Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
+      ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
+      : NewScope(top_scope_, FUNCTION_SCOPE);
+  ZoneList<Statement*>* body = NULL;
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  int handler_count = 0;
   bool only_simple_this_property_assignments;
   Handle<FixedArray> this_property_assignments;
   bool has_duplicate_parameters = false;
   // Parse function body.
-  { LexicalScope lexical_scope(this, scope, isolate());
+  { FunctionState function_state(this, scope, isolate());
     top_scope_->SetScopeName(function_name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    start_pos = scanner().location().beg_pos;
+    scope->set_start_position(scanner().location().beg_pos);
     Scanner::Location name_loc = Scanner::Location::invalid();
     Scanner::Location dupe_loc = Scanner::Location::invalid();
     Scanner::Location reserved_loc = Scanner::Location::invalid();
@@ -3756,10 +4107,7 @@
         reserved_loc = scanner().location();
       }
 
-      top_scope_->DeclareParameter(param_name,
-                                   harmony_block_scoping_
-                                   ? Variable::LET
-                                   : Variable::VAR);
+      top_scope_->DeclareParameter(param_name, is_extended_mode() ? LET : VAR);
       num_parameters++;
       if (num_parameters > kMaxNumFunctionParameters) {
         ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3780,71 +4128,129 @@
     // NOTE: We create a proxy and resolve it here so that in the
     // future we can change the AST to only refer to VariableProxies
     // instead of Variables and Proxis as is the case now.
+    Variable* fvar = NULL;
+    Token::Value fvar_init_op = Token::INIT_CONST;
     if (type == FunctionLiteral::NAMED_EXPRESSION) {
-      Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
-      VariableProxy* fproxy =
-          top_scope_->NewUnresolved(function_name, inside_with());
-      fproxy->BindTo(fvar);
-      body->Add(new(zone()) ExpressionStatement(
-          new(zone()) Assignment(isolate(),
-                                 Token::INIT_CONST,
-                                 fproxy,
-                                 new(zone()) ThisFunction(isolate()),
-                                 RelocInfo::kNoPosition)));
+      VariableMode fvar_mode;
+      if (is_extended_mode()) {
+        fvar_mode = CONST_HARMONY;
+        fvar_init_op = Token::INIT_CONST_HARMONY;
+      } else {
+        fvar_mode = CONST;
+      }
+      fvar = top_scope_->DeclareFunctionVar(function_name, fvar_mode);
     }
 
-    // Determine if the function will be lazily compiled. The mode can only
-    // be PARSE_LAZILY if the --lazy flag is true.  We will not lazily
-    // compile if we do not have preparser data for the function.
+    // Determine whether the function will be lazily compiled.
+    // The heuristics are:
+    // - It must not have been prohibited by the caller to Parse (some callers
+    //   need a full AST).
+    // - The outer scope must be trivial (only global variables in scope).
+    // - The function mustn't be a function expression with an open parenthesis
+    //   before; we consider that a hint that the function will be called
+    //   immediately, and it would be a waste of time to make it lazily
+    //   compiled.
+    // These are all things we can know at this point, without looking at the
+    // function itself.
     bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
                                top_scope_->outer_scope()->is_global_scope() &&
                                top_scope_->HasTrivialOuterContext() &&
-                               !parenthesized_function_ &&
-                               pre_data() != NULL);
+                               !parenthesized_function_);
     parenthesized_function_ = false;  // The bit was set for this function only.
 
     if (is_lazily_compiled) {
       int function_block_pos = scanner().location().beg_pos;
-      FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
-      if (!entry.is_valid()) {
-        // There is no preparser data for the function, we will not lazily
-        // compile after all.
-        is_lazily_compiled = false;
-      } else {
-        end_pos = entry.end_pos();
-        if (end_pos <= function_block_pos) {
-          // End position greater than end of stream is safe, and hard to check.
-          ReportInvalidPreparseData(function_name, CHECK_OK);
+      FunctionEntry entry;
+      if (pre_data_ != NULL) {
+        // If we have pre_data_, we use it to skip parsing the function body.
+        // the preparser data contains the information we need to construct the
+        // lazy function.
+        entry = pre_data()->GetFunctionEntry(function_block_pos);
+        if (entry.is_valid()) {
+          if (entry.end_pos() <= function_block_pos) {
+            // End position greater than end of stream is safe, and hard
+            // to check.
+            ReportInvalidPreparseData(function_name, CHECK_OK);
+          }
+          scanner().SeekForward(entry.end_pos() - 1);
+
+          scope->set_end_position(entry.end_pos());
+          Expect(Token::RBRACE, CHECK_OK);
+          isolate()->counters()->total_preparse_skipped()->Increment(
+              scope->end_position() - function_block_pos);
+          materialized_literal_count = entry.literal_count();
+          expected_property_count = entry.property_count();
+          top_scope_->SetLanguageMode(entry.language_mode());
+          only_simple_this_property_assignments = false;
+          this_property_assignments = isolate()->factory()->empty_fixed_array();
+        } else {
+          is_lazily_compiled = false;
         }
+      } else {
+        // With no preparser data, we partially parse the function, without
+        // building an AST. This gathers the data needed to build a lazy
+        // function.
+        SingletonLogger logger;
+        preparser::PreParser::PreParseResult result =
+            LazyParseFunctionLiteral(&logger);
+        if (result == preparser::PreParser::kPreParseStackOverflow) {
+          // Propagate stack overflow.
+          stack_overflow_ = true;
+          *ok = false;
+          return NULL;
+        }
+        if (logger.has_error()) {
+          const char* arg = logger.argument_opt();
+          Vector<const char*> args;
+          if (arg != NULL) {
+            args = Vector<const char*>(&arg, 1);
+          }
+          ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
+                          logger.message(), args);
+          *ok = false;
+          return NULL;
+        }
+        scope->set_end_position(logger.end());
+        Expect(Token::RBRACE, CHECK_OK);
         isolate()->counters()->total_preparse_skipped()->Increment(
-            end_pos - function_block_pos);
-        // Seek to position just before terminal '}'.
-        scanner().SeekForward(end_pos - 1);
-        materialized_literal_count = entry.literal_count();
-        expected_property_count = entry.property_count();
-        if (entry.strict_mode()) top_scope_->EnableStrictMode();
+            scope->end_position() - function_block_pos);
+        materialized_literal_count = logger.literals();
+        expected_property_count = logger.properties();
+        top_scope_->SetLanguageMode(logger.language_mode());
         only_simple_this_property_assignments = false;
         this_property_assignments = isolate()->factory()->empty_fixed_array();
-        Expect(Token::RBRACE, CHECK_OK);
       }
     }
 
     if (!is_lazily_compiled) {
+      body = new(zone()) ZoneList<Statement*>(8);
+      if (fvar != NULL) {
+        VariableProxy* fproxy = top_scope_->NewUnresolved(function_name);
+        fproxy->BindTo(fvar);
+        body->Add(new(zone()) ExpressionStatement(
+            new(zone()) Assignment(isolate(),
+                                   fvar_init_op,
+                                   fproxy,
+                                   new(zone()) ThisFunction(isolate()),
+                                   RelocInfo::kNoPosition)));
+      }
       ParseSourceElements(body, Token::RBRACE, CHECK_OK);
 
-      materialized_literal_count = lexical_scope.materialized_literal_count();
-      expected_property_count = lexical_scope.expected_property_count();
+      materialized_literal_count = function_state.materialized_literal_count();
+      expected_property_count = function_state.expected_property_count();
+      handler_count = function_state.handler_count();
       only_simple_this_property_assignments =
-          lexical_scope.only_simple_this_property_assignments();
-      this_property_assignments = lexical_scope.this_property_assignments();
+          function_state.only_simple_this_property_assignments();
+      this_property_assignments = function_state.this_property_assignments();
 
       Expect(Token::RBRACE, CHECK_OK);
-      end_pos = scanner().location().end_pos;
+      scope->set_end_position(scanner().location().end_pos);
     }
 
     // Validate strict mode.
-    if (top_scope_->is_strict_mode()) {
+    if (!top_scope_->is_classic_mode()) {
       if (IsEvalOrArguments(function_name)) {
+        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3867,6 +4273,7 @@
         return NULL;
       }
       if (name_is_strict_reserved) {
+        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3882,11 +4289,13 @@
         *ok = false;
         return NULL;
       }
-      CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
+      CheckOctalLiteral(scope->start_position(),
+                        scope->end_position(),
+                        CHECK_OK);
     }
   }
 
-  if (harmony_block_scoping_) {
+  if (is_extended_mode()) {
     CheckConflictingVarDeclarations(scope, CHECK_OK);
   }
 
@@ -3897,11 +4306,10 @@
                                   body,
                                   materialized_literal_count,
                                   expected_property_count,
+                                  handler_count,
                                   only_simple_this_property_assignments,
                                   this_property_assignments,
                                   num_parameters,
-                                  start_pos,
-                                  end_pos,
                                   type,
                                   has_duplicate_parameters);
   function_literal->set_function_token_position(function_token_position);
@@ -3911,6 +4319,27 @@
 }
 
 
+preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+    SingletonLogger* logger) {
+  HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
+  ASSERT_EQ(Token::LBRACE, scanner().current_token());
+
+  if (reusable_preparser_ == NULL) {
+    intptr_t stack_limit = isolate()->stack_guard()->real_climit();
+    bool do_allow_lazy = true;
+    reusable_preparser_ = new preparser::PreParser(&scanner_,
+                                                   NULL,
+                                                   stack_limit,
+                                                   do_allow_lazy,
+                                                   allow_natives_syntax_);
+  }
+  preparser::PreParser::PreParseResult result =
+      reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+                                                logger);
+  return result;
+}
+
+
 Expression* Parser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
@@ -4026,7 +4455,7 @@
 // Parses an identifier that is valid for the current scope, in particular it
 // fails on strict mode future reserved keywords in a strict scope.
 Handle<String> Parser::ParseIdentifier(bool* ok) {
-  if (top_scope_->is_strict_mode()) {
+  if (!top_scope_->is_classic_mode()) {
     Expect(Token::IDENTIFIER, ok);
   } else if (!Check(Token::IDENTIFIER)) {
     Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
@@ -4069,7 +4498,7 @@
 void Parser::CheckStrictModeLValue(Expression* expression,
                                    const char* error,
                                    bool* ok) {
-  ASSERT(top_scope_->is_strict_mode());
+  ASSERT(!top_scope_->is_classic_mode());
   VariableProxy* lhs = expression != NULL
       ? expression->AsVariableProxy()
       : NULL;
@@ -5122,18 +5551,20 @@
 
 // Create a Scanner for the preparser to use as input, and preparse the source.
 static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
-                                  bool allow_lazy,
-                                  ParserRecorder* recorder,
-                                  bool harmony_block_scoping) {
+                                  int flags,
+                                  ParserRecorder* recorder) {
   Isolate* isolate = Isolate::Current();
-  JavaScriptScanner scanner(isolate->unicode_cache());
-  scanner.SetHarmonyBlockScoping(harmony_block_scoping);
+  HistogramTimerScope timer(isolate->counters()->pre_parse());
+  Scanner scanner(isolate->unicode_cache());
+  scanner.SetHarmonyScoping(FLAG_harmony_scoping);
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
-  if (!preparser::PreParser::PreParseProgram(&scanner,
-                                             recorder,
-                                             allow_lazy,
-                                             stack_limit)) {
+  preparser::PreParser::PreParseResult result =
+      preparser::PreParser::PreParseProgram(&scanner,
+                                            recorder,
+                                            flags,
+                                            stack_limit);
+  if (result == preparser::PreParser::kPreParseStackOverflow) {
     isolate->StackOverflow();
     return NULL;
   }
@@ -5147,27 +5578,38 @@
 
 // Preparse, but only collect data that is immediately useful,
 // even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
+ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
                                            v8::Extension* extension,
-                                           bool harmony_block_scoping) {
+                                           int flags) {
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   if (!allow_lazy) {
     // Partial preparsing is only about lazily compiled functions.
     // If we don't allow lazy compilation, the log data will be empty.
     return NULL;
   }
+  flags |= kAllowLazy;
   PartialParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+  int source_length = source->length();
+  if (source->IsExternalTwoByteString()) {
+    ExternalTwoByteStringUC16CharacterStream stream(
+        Handle<ExternalTwoByteString>::cast(source), 0, source_length);
+    return DoPreParse(&stream, flags, &recorder);
+  } else {
+    GenericStringUC16CharacterStream stream(source, 0, source_length);
+    return DoPreParse(&stream, flags, &recorder);
+  }
 }
 
 
 ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
                                     v8::Extension* extension,
-                                    bool harmony_block_scoping) {
+                                    int flags) {
   Handle<Script> no_script;
-  bool allow_lazy = FLAG_lazy && (extension == NULL);
+  if (FLAG_lazy && (extension == NULL)) {
+    flags |= kAllowLazy;
+  }
   CompleteParserRecorder recorder;
-  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
+  return DoPreParse(source, flags, &recorder);
 }
 
 
@@ -5193,29 +5635,26 @@
 }
 
 
-bool ParserApi::Parse(CompilationInfo* info) {
+bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
   ASSERT(info->function() == NULL);
   FunctionLiteral* result = NULL;
   Handle<Script> script = info->script();
-  bool harmony_block_scoping = !info->is_native() &&
-                               FLAG_harmony_block_scoping;
+  ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
+  if (!info->is_native() && FLAG_harmony_scoping) {
+    // Harmony scoping is requested.
+    parsing_flags |= EXTENDED_MODE;
+  }
+  if (FLAG_allow_natives_syntax || info->is_native()) {
+    // We requre %identifier(..) syntax.
+    parsing_flags |= kAllowNativesSyntax;
+  }
   if (info->is_lazy()) {
-    bool allow_natives_syntax =
-        FLAG_allow_natives_syntax ||
-        info->is_native();
-    Parser parser(script, allow_natives_syntax, NULL, NULL);
-    parser.SetHarmonyBlockScoping(harmony_block_scoping);
+    ASSERT(!info->is_eval());
+    Parser parser(script, parsing_flags, NULL, NULL);
     result = parser.ParseLazy(info);
   } else {
-    // Whether we allow %identifier(..) syntax.
-    bool allow_natives_syntax =
-        info->is_native() || FLAG_allow_natives_syntax;
     ScriptDataImpl* pre_data = info->pre_parse_data();
-    Parser parser(script,
-                  allow_natives_syntax,
-                  info->extension(),
-                  pre_data);
-    parser.SetHarmonyBlockScoping(harmony_block_scoping);
+    Parser parser(script, parsing_flags, info->extension(), pre_data);
     if (pre_data != NULL && pre_data->has_error()) {
       Scanner::Location loc = pre_data->MessageLocation();
       const char* message = pre_data->BuildMessage();
@@ -5228,10 +5667,7 @@
       DeleteArray(args.start());
       ASSERT(info->isolate()->has_pending_exception());
     } else {
-      Handle<String> source = Handle<String>(String::cast(script->source()));
-      result = parser.ParseProgram(source,
-                                   info->is_global(),
-                                   info->StrictMode());
+      result = parser.ParseProgram(info);
     }
   }
   info->SetFunction(result);
diff --git a/src/parser.h b/src/parser.h
index 3312f2f..75f8e10 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -33,6 +33,7 @@
 #include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "scopes.h"
+#include "preparser.h"
 
 namespace v8 {
 namespace internal {
@@ -42,7 +43,6 @@
 class ParserLog;
 class PositionStack;
 class Target;
-class LexicalScope;
 
 template <typename T> class ZoneListWrapper;
 
@@ -67,26 +67,36 @@
 
 class FunctionEntry BASE_EMBEDDED {
  public:
-  explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
-  FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
+  enum {
+    kStartPositionIndex,
+    kEndPositionIndex,
+    kLiteralCountIndex,
+    kPropertyCountIndex,
+    kLanguageModeIndex,
+    kSize
+  };
 
-  int start_pos() { return backing_[kStartPosOffset]; }
-  int end_pos() { return backing_[kEndPosOffset]; }
-  int literal_count() { return backing_[kLiteralCountOffset]; }
-  int property_count() { return backing_[kPropertyCountOffset]; }
-  bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
+  explicit FunctionEntry(Vector<unsigned> backing)
+    : backing_(backing) { }
 
-  bool is_valid() { return backing_.length() > 0; }
+  FunctionEntry() : backing_() { }
 
-  static const int kSize = 5;
+  int start_pos() { return backing_[kStartPositionIndex]; }
+  int end_pos() { return backing_[kEndPositionIndex]; }
+  int literal_count() { return backing_[kLiteralCountIndex]; }
+  int property_count() { return backing_[kPropertyCountIndex]; }
+  LanguageMode language_mode() {
+    ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
+           backing_[kLanguageModeIndex] == STRICT_MODE ||
+           backing_[kLanguageModeIndex] == EXTENDED_MODE);
+    return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+  }
+
+  bool is_valid() { return !backing_.is_empty(); }
 
  private:
   Vector<unsigned> backing_;
-  static const int kStartPosOffset = 0;
-  static const int kEndPosOffset = 1;
-  static const int kLiteralCountOffset = 2;
-  static const int kPropertyCountOffset = 3;
-  static const int kStrictModeOffset = 4;
+  bool owns_data_;
 };
 
 
@@ -98,7 +108,7 @@
 
   // Create an empty ScriptDataImpl that is guaranteed to not satisfy
   // a SanityCheck.
-  ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
+  ScriptDataImpl() : owns_store_(false) { }
 
   virtual ~ScriptDataImpl();
   virtual int Length();
@@ -159,18 +169,18 @@
   // Parses the source code represented by the compilation info and sets its
   // function literal.  Returns false (and deallocates any allocated AST
   // nodes) if parsing failed.
-  static bool Parse(CompilationInfo* info);
+  static bool Parse(CompilationInfo* info, int flags);
 
   // Generic preparser generating full preparse data.
   static ScriptDataImpl* PreParse(UC16CharacterStream* source,
                                   v8::Extension* extension,
-                                  bool harmony_block_scoping);
+                                  int flags);
 
   // Preparser that only does preprocessing that makes sense if only used
   // immediately after.
-  static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
+  static ScriptDataImpl* PartialPreParse(Handle<String> source,
                                          v8::Extension* extension,
-                                         bool harmony_block_scoping);
+                                         int flags);
 };
 
 // ----------------------------------------------------------------------------
@@ -415,19 +425,23 @@
 // ----------------------------------------------------------------------------
 // JAVASCRIPT PARSING
 
+// Forward declaration.
+class SingletonLogger;
+
 class Parser {
  public:
   Parser(Handle<Script> script,
-         bool allow_natives_syntax,
+         int parsing_flags,  // Combination of ParsingFlags
          v8::Extension* extension,
          ScriptDataImpl* pre_data);
-  virtual ~Parser() { }
+  virtual ~Parser() {
+    if (reusable_preparser_ != NULL) {
+      delete reusable_preparser_;
+    }
+  }
 
   // Returns NULL if parsing failed.
-  FunctionLiteral* ParseProgram(Handle<String> source,
-                                bool in_global_context,
-                                StrictModeFlag strict_mode);
-
+  FunctionLiteral* ParseProgram(CompilationInfo* info);
   FunctionLiteral* ParseLazy(CompilationInfo* info);
 
   void ReportMessageAt(Scanner::Location loc,
@@ -436,7 +450,6 @@
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
                        Vector<Handle<String> > args);
-  void SetHarmonyBlockScoping(bool block_scoping);
 
  private:
   // Limit on number of function parameters is chosen arbitrarily.
@@ -445,9 +458,7 @@
   // should be checked.
   static const int kMaxNumFunctionParameters = 32766;
   static const int kMaxNumFunctionLocals = 32767;
-  FunctionLiteral* ParseLazy(CompilationInfo* info,
-                             UC16CharacterStream* source,
-                             ZoneScope* zone_scope);
+
   enum Mode {
     PARSE_LAZILY,
     PARSE_EAGERLY
@@ -459,13 +470,25 @@
     kForStatement
   };
 
+  // If a list of variable declarations includes any initializers.
+  enum VariableDeclarationProperties {
+    kHasInitializers,
+    kHasNoInitializers
+  };
+
+  class BlockState;
+  class FunctionState;
+
+  FunctionLiteral* ParseLazy(CompilationInfo* info,
+                             UC16CharacterStream* source,
+                             ZoneScope* zone_scope);
+
   Isolate* isolate() { return isolate_; }
   Zone* zone() { return isolate_->zone(); }
 
   // Called by ParseProgram after setting up the scanner.
-  FunctionLiteral* DoParseProgram(Handle<String> source,
-                                  bool in_global_context,
-                                  StrictModeFlag strict_mode,
+  FunctionLiteral* DoParseProgram(CompilationInfo* info,
+                                  Handle<String> source,
                                   ZoneScope* zone_scope);
 
   // Report syntax error
@@ -473,10 +496,14 @@
   void ReportInvalidPreparseData(Handle<String> name, bool* ok);
   void ReportMessage(const char* message, Vector<const char*> args);
 
-  bool inside_with() const { return with_nesting_level_ > 0; }
-  JavaScriptScanner& scanner()  { return scanner_; }
+  bool inside_with() const { return top_scope_->inside_with(); }
+  Scanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
+  bool is_extended_mode() {
+    ASSERT(top_scope_ != NULL);
+    return top_scope_->is_extended_mode();
+  }
 
   // Check if the given string is 'eval' or 'arguments'.
   bool IsEvalOrArguments(Handle<String> string);
@@ -492,10 +519,10 @@
   Statement* ParseFunctionDeclaration(bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
   Block* ParseBlock(ZoneStringList* labels, bool* ok);
-  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
   Block* ParseVariableStatement(VariableDeclarationContext var_context,
                                 bool* ok);
   Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                   VariableDeclarationProperties* decl_props,
                                    Handle<String>* out,
                                    bool* ok);
   Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
@@ -515,6 +542,9 @@
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
 
+  // Support for hamony block scoped bindings.
+  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
+
   Expression* ParseExpression(bool accept_IN, bool* ok);
   Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
   Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
@@ -533,11 +563,6 @@
   ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
   Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
 
-  Expression* NewCompareNode(Token::Value op,
-                             Expression* x,
-                             Expression* y,
-                             int position);
-
   // Populate the constant properties fixed array for a materialized object
   // literal.
   void BuildObjectLiteralConstantProperties(
@@ -656,7 +681,7 @@
   void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
 
   // Parser support
-  VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+  VariableProxy* Declare(Handle<String> name, VariableMode mode,
                          FunctionLiteral* fun,
                          bool resolve,
                          bool* ok);
@@ -670,11 +695,12 @@
   // Factory methods.
 
   Statement* EmptyStatement() {
-    static v8::internal::EmptyStatement empty;
-    return &empty;
+    static v8::internal::EmptyStatement* empty =
+        ::new v8::internal::EmptyStatement();
+    return empty;
   }
 
-  Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
+  Scope* NewScope(Scope* parent, ScopeType type);
 
   Handle<String> LookupSymbol(int symbol_id);
 
@@ -712,33 +738,34 @@
                             Handle<String> type,
                             Vector< Handle<Object> > arguments);
 
+  preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+       SingletonLogger* logger);
+
   Isolate* isolate_;
   ZoneList<Handle<String> > symbol_cache_;
 
   Handle<Script> script_;
-  JavaScriptScanner scanner_;
-
+  Scanner scanner_;
+  preparser::PreParser* reusable_preparser_;
   Scope* top_scope_;
-  int with_nesting_level_;
-
-  LexicalScope* lexical_scope_;
-  Mode mode_;
-
+  FunctionState* current_function_state_;
   Target* target_stack_;  // for break, continue statements
-  bool allow_natives_syntax_;
   v8::Extension* extension_;
-  bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
+
+  Mode mode_;
+  bool allow_natives_syntax_;
+  bool allow_lazy_;
   bool stack_overflow_;
   // If true, the next (and immediately following) function literal is
   // preceded by a parenthesis.
   // Heuristically that means that the function will be called immediately,
   // so never lazily compile it.
   bool parenthesized_function_;
-  bool harmony_block_scoping_;
 
-  friend class LexicalScope;
+  friend class BlockState;
+  friend class FunctionState;
 };
 
 
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index a72f5da..942e764 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -355,6 +355,17 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() : thread_(kNoThread) {}
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 685ec3c..61fc1b5 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -333,44 +333,132 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 032cdaa..408e0c0 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -78,30 +78,6 @@
 static Mutex* limit_mutex = NULL;
 
 
-static void* GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
-    uint64_t rnd1 = V8::RandomPrivate(isolate);
-    uint64_t rnd2 = V8::RandomPrivate(isolate);
-    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    uint32_t raw_addr = V8::RandomPrivate(isolate);
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
-    raw_addr &= 0x3ffff000;
-    raw_addr += 0x20000000;
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
 void OS::Setup() {
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = Ticks() ^ (getpid() << 16);
@@ -381,9 +357,9 @@
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = GetRandomMmapAddr();
+  void* addr = OS::GetRandomMmapAddr();
   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   if (mbase == MAP_FAILED) {
     LOG(i::Isolate::Current(),
@@ -453,7 +429,12 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -468,13 +449,18 @@
     return NULL;
   }
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -553,10 +539,14 @@
   // kernel log.
   int size = sysconf(_SC_PAGESIZE);
   FILE* f = fopen(kGCFakeMmap, "w+");
-  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
-                    fileno(f), 0);
+  void* addr = mmap(OS::GetRandomMmapAddr(),
+                    size,
+                    PROT_READ | PROT_EXEC,
+                    MAP_PRIVATE,
+                    fileno(f),
+                    0);
   ASSERT(addr != MAP_FAILED);
-  munmap(addr, size);
+  OS::Free(addr, size);
   fclose(f);
 }
 
@@ -598,44 +588,132 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
@@ -696,7 +774,8 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  CHECK_EQ(0, result);
   ASSERT(data_->thread_ != kNoThread);
 }
 
@@ -855,7 +934,7 @@
 }
 
 
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__) || defined(__i386__))
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
 // Android runs a fairly new Linux kernel, so signal info is there,
 // but the C library doesn't have the structs defined.
 
@@ -877,7 +956,38 @@
   __sigset_t uc_sigmask;
 } ucontext_t;
 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-enum X86Registers {REG_EIP = 14, REG_ESP = 7, REG_EBP = 6};
+
+#elif !defined(__GLIBC__) && defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+struct sigcontext {
+  uint32_t regmask;
+  uint32_t status;
+  uint64_t pc;
+  uint64_t gregs[32];
+  uint64_t fpregs[32];
+  uint32_t acx;
+  uint32_t fpc_csr;
+  uint32_t fpc_eir;
+  uint32_t used_math;
+  uint32_t dsp;
+  uint64_t mdhi;
+  uint64_t mdlo;
+  uint32_t hi1;
+  uint32_t lo1;
+  uint32_t hi2;
+  uint32_t lo2;
+  uint32_t hi3;
+  uint32_t lo3;
+};
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  __sigset_t uc_sigmask;
+} ucontext_t;
 
 #endif
 
@@ -893,7 +1003,6 @@
 
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
   Isolate* isolate = Isolate::UncheckedCurrent();
@@ -935,15 +1044,14 @@
   sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
   sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
   sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
+#endif  // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
 #elif V8_HOST_ARCH_MIPS
-  sample.pc = reinterpret_cast<Address>(mcontext.pc);
-  sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
-  sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif
+  sample->pc = reinterpret_cast<Address>(mcontext.pc);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif  // V8_HOST_ARCH_*
   sampler->SampleStack(sample);
   sampler->Tick(sample);
-#endif
 }
 
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 6be941a..0f9b958 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -94,12 +94,8 @@
 
 
 void OS::Setup() {
-  // Seed the random number generator.
-  // Convert the current time to a 64-bit integer first, before converting it
-  // to an unsigned. Going directly will cause an overflow and the seed to be
-  // set to all ones. The seed will be identical for different instances that
-  // call this setup code within the same millisecond.
-  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  // Seed the random number generator. We preserve microsecond resolution.
+  uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
@@ -148,9 +144,12 @@
                    bool is_executable) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot,
+  void* mbase = mmap(OS::GetRandomMmapAddr(),
+                     msize,
+                     prot,
                      MAP_PRIVATE | MAP_ANON,
-                     kMmapFd, kMmapFdOffset);
+                     kMmapFd,
+                     kMmapFdOffset);
   if (mbase == MAP_FAILED) {
     LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
@@ -207,7 +206,12 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -222,13 +226,18 @@
     return NULL;
   }
   void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+      mmap(OS::GetRandomMmapAddr(),
+          size,
+          PROT_READ | PROT_WRITE,
+          MAP_SHARED,
+          fileno(file),
+          0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -334,33 +343,108 @@
 }
 
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
+  if (MAP_FAILED == mmap(address,
+                         size,
+                         prot,
                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
+                         kMmapFd,
+                         kMmapFdOffset)) {
     return false;
   }
 
@@ -370,9 +454,22 @@
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  return mmap(address,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
 }
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 8c2a863..a59a926 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -295,6 +295,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() {
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 973329b..f044a6e 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,79 +33,99 @@
 #include <signal.h>
 #include <sys/time.h>
 #include <sys/resource.h>
+#include <sys/syscall.h>
 #include <sys/types.h>
 #include <stdlib.h>
 
 #include <sys/types.h>  // mmap & munmap
 #include <sys/mman.h>   // mmap & munmap
 #include <sys/stat.h>   // open
-#include <sys/fcntl.h>  // open
-#include <unistd.h>     // getpagesize
+#include <fcntl.h>      // open
+#include <unistd.h>     // sysconf
 #include <execinfo.h>   // backtrace, backtrace_symbols
 #include <strings.h>    // index
 #include <errno.h>
 #include <stdarg.h>
-#include <limits.h>
 
 #undef MAP_TYPE
 
 #include "v8.h"
-#include "v8threads.h"
 
 #include "platform.h"
+#include "v8threads.h"
 #include "vm-state-inl.h"
 
 
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on OpenBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
+// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
 static const pthread_t kNoThread = (pthread_t) 0;
 
 
 double ceiling(double x) {
-    // Correct as on OS X
-    if (-1.0 < x && x < 0.0) {
-        return -0.0;
-    } else {
-        return ceil(x);
-    }
+  return ceil(x);
 }
 
 
 static Mutex* limit_mutex = NULL;
 
 
+static void* GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
 void OS::Setup() {
-  // Seed the random number generator.
-  // Convert the current time to a 64-bit integer first, before converting it
-  // to an unsigned. Going directly can cause an overflow and the seed to be
-  // set to all ones. The seed will be identical for different instances that
-  // call this setup code within the same millisecond.
-  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  // Seed the random number generator. We preserve microsecond resolution.
+  uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  *ptr = value;
-}
-
-
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
-  return 0;  // OpenBSD runs on anything.
+  return 0;
 }
 
 
 int OS::ActivationFrameAlignment() {
-  // 16 byte alignment on OpenBSD
+  // With gcc 4.4 the tree vectorization optimizer can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
   return 16;
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  __asm__ __volatile__("" : : : "memory");
+  // An x86 store acts as a release barrier.
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -150,19 +170,20 @@
 
 
 size_t OS::AllocateAlignment() {
-  return getpagesize();
+  return sysconf(_SC_PAGESIZE);
 }
 
 
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
-                   bool executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
   if (mbase == MAP_FAILED) {
-    LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+    LOG(i::Isolate::Current(),
+        StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
   }
   *allocated = msize;
@@ -171,9 +192,9 @@
 }
 
 
-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
   // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(buf, length);
+  int result = munmap(address, size);
   USE(result);
   ASSERT(result == 0);
 }
@@ -192,13 +213,7 @@
 
 
 void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
-  asm("bkpt 0");
-# endif
-#else
   asm("int $3");
-#endif
 }
 
 
@@ -245,61 +260,95 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
 
-static unsigned StringToLong(char* buffer) {
-  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
-}
-
-
 void OS::LogSharedLibraryAddresses() {
-  static const int MAP_LENGTH = 1024;
-  int fd = open("/proc/self/maps", O_RDONLY);
-  if (fd < 0) return;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  i::Isolate* isolate = ISOLATE;
+  // This loop will terminate once the scanning hits an EOF.
   while (true) {
-    char addr_buffer[11];
-    addr_buffer[0] = '0';
-    addr_buffer[1] = 'x';
-    addr_buffer[10] = 0;
-    int result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned start = StringToLong(addr_buffer);
-    result = read(fd, addr_buffer + 2, 1);
-    if (result < 1) break;
-    if (addr_buffer[2] != '-') break;
-    result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned end = StringToLong(addr_buffer);
-    char buffer[MAP_LENGTH];
-    int bytes_read = -1;
-    do {
-      bytes_read++;
-      if (bytes_read >= MAP_LENGTH - 1)
-        break;
-      result = read(fd, buffer + bytes_read, 1);
-      if (result < 1) break;
-    } while (buffer[bytes_read] != '\n');
-    buffer[bytes_read] = 0;
-    // Ignore mappings that are not executable.
-    if (buffer[3] != 'x') continue;
-    char* start_of_path = index(buffer, '/');
-    // There may be no filename in this line.  Skip to next.
-    if (start_of_path == NULL) continue;
-    buffer[bytes_read] = 0;
-    LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to setup
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
   }
-  close(fd);
+  free(lib_name);
+  fclose(fp);
 }
 
 
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
 void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(kGCFakeMmap, "w+");
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
+  ASSERT(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
 }
 
 
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  // backtrace is a glibc extension.
   int frames_size = frames.length();
   ScopedVector<void*> addresses(frames_size);
 
@@ -331,62 +380,151 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
 
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
 class Thread::PlatformData : public Malloced {
  public:
+  PlatformData() : thread_(kNoThread) {}
+
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
 Thread::Thread(const Options& options)
-    : data_(new PlatformData),
+    : data_(new PlatformData()),
       stack_size_(options.stack_size) {
   set_name(options.name);
 }
 
 
 Thread::Thread(const char* name)
-    : data_(new PlatformData),
+    : data_(new PlatformData()),
       stack_size_(0) {
   set_name(name);
 }
@@ -402,6 +540,11 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
+#ifdef PR_SET_NAME
+  prctl(PR_SET_NAME,
+        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
+        0, 0, 0);
+#endif
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
   thread->Run();
@@ -477,6 +620,7 @@
     ASSERT(result == 0);
     result = pthread_mutex_init(&mutex_, &attrs);
     ASSERT(result == 0);
+    USE(result);
   }
 
   virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -533,6 +677,14 @@
 }
 
 
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
+    (ts)->tv_sec = (tv)->tv_sec;                                    \
+    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
+} while (false)
+#endif
+
+
 bool OpenBSDSemaphore::Wait(int timeout) {
   const long kOneSecondMicros = 1000000;  // NOLINT
 
@@ -566,29 +718,15 @@
   }
 }
 
-
 Semaphore* OS::CreateSemaphore(int count) {
   return new OpenBSDSemaphore(count);
 }
 
 
 static pthread_t GetThreadID() {
-  pthread_t thread_id = pthread_self();
-  return thread_id;
+  return pthread_self();
 }
 
-
-class Sampler::PlatformData : public Malloced {
- public:
-  PlatformData() : vm_tid_(GetThreadID()) {}
-
-  pthread_t vm_tid() const { return vm_tid_; }
-
- private:
-  pthread_t vm_tid_;
-};
-
-
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
   USE(info);
   if (signal != SIGPROF) return;
@@ -620,16 +758,23 @@
   sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
   sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
   sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#elif V8_HOST_ARCH_ARM
-  sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
-  sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
-  sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
 #endif
   sampler->SampleStack(sample);
   sampler->Tick(sample);
 }
 
 
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() : vm_tid_(GetThreadID()) {}
+
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+
 class SignalSender : public Thread {
  public:
   enum SleepInterval {
@@ -639,21 +784,31 @@
 
   explicit SignalSender(int interval)
       : Thread("SignalSender"),
+        vm_tgid_(getpid()),
         interval_(interval) {}
 
+  static void InstallSignalHandler() {
+    struct sigaction sa;
+    sa.sa_sigaction = ProfilerSignalHandler;
+    sigemptyset(&sa.sa_mask);
+    sa.sa_flags = SA_RESTART | SA_SIGINFO;
+    signal_handler_installed_ =
+        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+  }
+
+  static void RestoreSignalHandler() {
+    if (signal_handler_installed_) {
+      sigaction(SIGPROF, &old_signal_handler_, 0);
+      signal_handler_installed_ = false;
+    }
+  }
+
   static void AddActiveSampler(Sampler* sampler) {
     ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
-      // Install a signal handler.
-      struct sigaction sa;
-      sa.sa_sigaction = ProfilerSignalHandler;
-      sigemptyset(&sa.sa_mask);
-      sa.sa_flags = SA_RESTART | SA_SIGINFO;
-      signal_handler_installed_ =
-          (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
-      // Start a thread that sends SIGPROF signal to VM threads.
+      // Start a thread that will send SIGPROF signal to VM threads,
+      // when CPU profiling will be enabled.
       instance_ = new SignalSender(sampler->interval());
       instance_->Start();
     } else {
@@ -668,12 +823,7 @@
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
-
-      // Restore the old signal handler.
-      if (signal_handler_installed_) {
-        sigaction(SIGPROF, &old_signal_handler_, 0);
-        signal_handler_installed_ = false;
-      }
+      RestoreSignalHandler();
     }
   }
 
@@ -685,6 +835,11 @@
       bool cpu_profiling_enabled =
           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      if (cpu_profiling_enabled && !signal_handler_installed_) {
+        InstallSignalHandler();
+      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+        RestoreSignalHandler();
+      }
       // When CPU profiling is enabled both JavaScript and C++ code is
       // profiled. We must not suspend.
       if (!cpu_profiling_enabled) {
@@ -751,6 +906,7 @@
     USE(result);
   }
 
+  const int vm_tgid_;
   const int interval_;
   RuntimeProfilerRateLimiter rate_limiter_;
 
@@ -763,6 +919,7 @@
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
+
 Mutex* SignalSender::mutex_ = OS::CreateMutex();
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 52cf029..08417ff 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -46,9 +46,9 @@
 
 #undef MAP_TYPE
 
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
 #define LOG_TAG "v8"
-#include <utils/Log.h>  // LOG_PRI_VA
+#include <android/log.h>
 #endif
 
 #include "v8.h"
@@ -70,6 +70,12 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  static intptr_t page_size = getpagesize();
+  return page_size;
+}
+
+
 #ifndef __CYGWIN__
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
@@ -84,6 +90,34 @@
 #endif  // __CYGWIN__
 
 
+void* OS::GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+    // 10.6 and 10.7.
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
 // ----------------------------------------------------------------------------
 // Math functions
 
@@ -182,7 +216,7 @@
 
 void OS::VPrint(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vprintf(format, args);
 #endif
@@ -199,7 +233,7 @@
 
 void OS::VFPrint(FILE* out, const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vfprintf(out, format, args);
 #endif
@@ -216,7 +250,7 @@
 
 void OS::VPrintError(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
 #else
   vfprintf(stderr, format, args);
 #endif
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 035d394..ca6443b 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -342,11 +342,34 @@
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
     return false;
   }
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 97788e2..8bbdcb2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -831,43 +831,67 @@
 }
 
 
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
+static void* GetRandomAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+    // The address range used to randomize RWX allocations in OS::Allocate
+    // Try not to map pages into the default range that windows loads DLLs
+    // Use a multiple of 64k to prevent committing unused memory.
+    // Note: This does not guarantee RWX regions will be within the
+    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+    uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
+        | kAllocationRandomAddressMin;
+    address &= kAllocationRandomAddressMax;
+    return reinterpret_cast<void *>(address);
+  }
+  return NULL;
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  // The address range used to randomize RWX allocations in OS::Allocate
-  // Try not to map pages into the default range that windows loads DLLs
-  // Use a multiple of 64k to prevent committing unused memory.
-  // Note: This does not guarantee RWX regions will be within the
-  // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-  static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-  static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-
   // VirtualAlloc rounds allocated size to page size automatically.
   size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-  intptr_t address = 0;
 
   // Windows XP SP2 allows Data Excution Prevention (DEP).
   int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
 
-  // For exectutable pages try and randomize the allocation address
-  if (prot == PAGE_EXECUTE_READWRITE &&
-      msize >= static_cast<size_t>(Page::kPageSize)) {
-    address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
-      | kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-  }
-
-  LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address),
-                              msize,
-                              MEM_COMMIT | MEM_RESERVE,
-                              prot);
-  if (mbase == NULL && address != 0)
-    mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
+  LPVOID mbase = RandomizedVirtualAlloc(msize,
+                                        MEM_COMMIT | MEM_RESERVE,
+                                        prot);
 
   if (mbase == NULL) {
     LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
@@ -1397,38 +1421,108 @@
 }
 
 
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  Address base = RoundUp(static_cast<Address>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  ASSERT(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    ASSERT(base == static_cast<Address>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+    bool result = ReleaseRegion(address_, size_);
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
-    return false;
-  }
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
 
-  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
-  return true;
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  if (CommitRegion(address, size, is_executable)) {
+    UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+    return true;
+  }
+  return false;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return VirtualFree(address, size, MEM_DECOMMIT) != false;
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
+  return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
 }
 
 
@@ -1453,6 +1547,7 @@
  public:
   explicit PlatformData(HANDLE thread) : thread_(thread) {}
   HANDLE thread_;
+  unsigned thread_id_;
 };
 
 
@@ -1496,13 +1591,15 @@
                      ThreadEntry,
                      this,
                      0,
-                     NULL));
+                     &data_->thread_id_));
 }
 
 
 // Wait for thread to terminate.
 void Thread::Join() {
-  WaitForSingleObject(data_->thread_, INFINITE);
+  if (data_->thread_id_ != GetCurrentThreadId()) {
+    WaitForSingleObject(data_->thread_, INFINITE);
+  }
 }
 
 
diff --git a/src/platform.h b/src/platform.h
index 034fe34..726f9ca 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -178,9 +178,14 @@
   // Assign memory as a guard page so that access will cause an exception.
   static void Guard(void* address, const size_t size);
 
+  // Generate a random address to be used for hinting mmap().
+  static void* GetRandomMmapAddr();
+
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
+  static intptr_t CommitPageSize();
+
   // Returns an indication of whether a pointer is in a space that
   // has been allocated by Allocate().  This method may conservatively
   // always return false, but giving more accurate information may
@@ -301,23 +306,46 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
 class VirtualMemory {
  public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
   // Reserves virtual memory with size.
   explicit VirtualMemory(size_t size);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment);
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
   ~VirtualMemory();
 
   // Returns whether the memory has been reserved.
   bool IsReserved();
 
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
   // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
   void* address() {
     ASSERT(IsReserved());
     return address_;
   }
 
-  // Returns the size of the reserved memory.
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
   size_t size() { return size_; }
 
   // Commits real memory. Returns whether the operation succeeded.
@@ -326,11 +354,46 @@
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
+  void Release() {
+    ASSERT(IsReserved());
+    // Notice: Order is important here. The VirtualMemory object might live
+    // inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    ASSERT(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    ASSERT(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  static void* ReserveRegion(size_t size);
+
+  static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* base, size_t size);
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* base, size_t size);
+
  private:
   void* address_;  // Start address of the virtual memory.
   size_t size_;  // Size of the virtual memory.
 };
 
+
 // ----------------------------------------------------------------------------
 // Thread
 //
diff --git a/src/preparse-data.h b/src/preparse-data.h
index c6503c4..c77a47a 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -49,7 +49,7 @@
                            int end,
                            int literals,
                            int properties,
-                           int strict_mode) = 0;
+                           LanguageMode language_mode) = 0;
 
   // Logs a symbol creation of a literal or identifier.
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
@@ -89,12 +89,12 @@
                            int end,
                            int literals,
                            int properties,
-                           int strict_mode) {
+                           LanguageMode language_mode) {
     function_store_.Add(start);
     function_store_.Add(end);
     function_store_.Add(literals);
     function_store_.Add(properties);
-    function_store_.Add(strict_mode);
+    function_store_.Add(language_mode);
   }
 
   // Logs an error message and marks the log as containing an error.
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 899489e..1bca9a3 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -182,13 +182,13 @@
   internal::InputStreamUTF16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
   internal::UnicodeCache unicode_cache;
-  internal::JavaScriptScanner scanner(&unicode_cache);
+  internal::Scanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
       preparser::PreParser::PreParseProgram(&scanner,
                                             &recorder,
-                                            true,
+                                            internal::kAllowLazy,
                                             stack_limit);
   if (result == preparser::PreParser::kPreParseStackOverflow) {
     return PreParserData::StackOverflow();
diff --git a/src/preparser.cc b/src/preparser.cc
index 6021ebd..b36f4fa 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -52,6 +52,34 @@
 
 namespace preparser {
 
+PreParser::PreParseResult PreParser::PreParseLazyFunction(
+    i::LanguageMode mode, i::ParserRecorder* log) {
+  log_ = log;
+  // Lazy functions always have trivial outer scopes (no with/catch scopes).
+  Scope top_scope(&scope_, kTopLevelScope);
+  set_language_mode(mode);
+  Scope function_scope(&scope_, kFunctionScope);
+  ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+  bool ok = true;
+  int start_position = scanner_->peek_location().beg_pos;
+  ParseLazyFunctionLiteralBody(&ok);
+  if (stack_overflow_) return kPreParseStackOverflow;
+  if (!ok) {
+    ReportUnexpectedToken(scanner_->current_token());
+  } else {
+    ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+    if (!is_classic_mode()) {
+      int end_pos = scanner_->location().end_pos;
+      CheckOctalLiteral(start_position, end_pos, &ok);
+      if (ok) {
+        CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
+      }
+    }
+  }
+  return kPreParseSuccess;
+}
+
+
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
 // See preparser-data.h for the data.
@@ -72,7 +100,7 @@
   if (token == i::Token::ILLEGAL && stack_overflow_) {
     return;
   }
-  i::JavaScriptScanner::Location source_location = scanner_->location();
+  i::Scanner::Location source_location = scanner_->location();
 
   // Four of the tokens are treated specially
   switch (token) {
@@ -117,8 +145,21 @@
 
 
 PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // SourceElement:
+  //    LetDeclaration
+  //    ConstDeclaration
+
   switch (peek()) {
+    case i::Token::FUNCTION:
+      return ParseFunctionDeclaration(ok);
     case i::Token::LET:
+    case i::Token::CONST:
       return ParseVariableStatement(kSourceElement, ok);
     default:
       return ParseStatement(ok);
@@ -136,7 +177,8 @@
     Statement statement = ParseSourceElement(CHECK_OK);
     if (allow_directive_prologue) {
       if (statement.IsUseStrictLiteral()) {
-        set_strict_mode();
+        set_language_mode(harmony_scoping_ ?
+                          i::EXTENDED_MODE : i::STRICT_MODE);
       } else if (!statement.IsStringLiteral()) {
         allow_directive_prologue = false;
       }
@@ -185,6 +227,7 @@
       return ParseBlock(ok);
 
     case i::Token::CONST:
+    case i::Token::LET:
     case i::Token::VAR:
       return ParseVariableStatement(kStatement, ok);
 
@@ -225,8 +268,19 @@
     case i::Token::TRY:
       return ParseTryStatement(ok);
 
-    case i::Token::FUNCTION:
-      return ParseFunctionDeclaration(ok);
+    case i::Token::FUNCTION: {
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseFunctionDeclaration(CHECK_OK);
+      i::Scanner::Location end_location = scanner_->location();
+      if (!is_classic_mode()) {
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      } else {
+        return statement;
+      }
+    }
 
     case i::Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -271,14 +325,10 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    i::Scanner::Location start_location = scanner_->peek_location();
-    Statement statement = ParseSourceElement(CHECK_OK);
-    i::Scanner::Location end_location = scanner_->location();
-    if (strict_mode() && statement.IsFunctionDeclaration()) {
-      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                      "strict_function", NULL);
-      *ok = false;
-      return Statement::Default();
+    if (is_extended_mode()) {
+      ParseSourceElement(CHECK_OK);
+    } else {
+      ParseStatement(CHECK_OK);
     }
   }
   Expect(i::Token::RBRACE, ok);
@@ -294,6 +344,7 @@
 
   Statement result = ParseVariableDeclarations(var_context,
                                                NULL,
+                                               NULL,
                                                CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
@@ -307,22 +358,73 @@
 // of 'for-in' loops.
 PreParser::Statement PreParser::ParseVariableDeclarations(
     VariableDeclarationContext var_context,
+    VariableDeclarationProperties* decl_props,
     int* num_decl,
     bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-
+  //
+  // The ES6 Draft Rev3 specifies the following grammar for const declarations
+  //
+  // ConstDeclaration ::
+  //   const ConstBinding (',' ConstBinding)* ';'
+  // ConstBinding ::
+  //   Identifier '=' AssignmentExpression
+  //
+  // TODO(ES6):
+  // ConstBinding ::
+  //   BindingPattern '=' AssignmentExpression
+  bool require_initializer = false;
   if (peek() == i::Token::VAR) {
     Consume(i::Token::VAR);
   } else if (peek() == i::Token::CONST) {
-    if (strict_mode()) {
+    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
+    //
+    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    //
+    // However disallowing const in classic mode will break compatibility with
+    // existing pages. Therefore we keep allowing const with the old
+    // non-harmony semantics in classic mode.
+    Consume(i::Token::CONST);
+    switch (language_mode()) {
+      case i::CLASSIC_MODE:
+        break;
+      case i::STRICT_MODE: {
+        i::Scanner::Location location = scanner_->peek_location();
+        ReportMessageAt(location, "strict_const", NULL);
+        *ok = false;
+        return Statement::Default();
+      }
+      case i::EXTENDED_MODE:
+        if (var_context != kSourceElement &&
+            var_context != kForStatement) {
+          i::Scanner::Location location = scanner_->peek_location();
+          ReportMessageAt(location.beg_pos, location.end_pos,
+                          "unprotected_const", NULL);
+          *ok = false;
+          return Statement::Default();
+        }
+        require_initializer = true;
+        break;
+    }
+  } else if (peek() == i::Token::LET) {
+    // ES6 Draft Rev4 section 12.2.1:
+    //
+    // LetDeclaration : let LetBindingList ;
+    //
+    // * It is a Syntax Error if the code that matches this production is not
+    //   contained in extended code.
+    if (!is_extended_mode()) {
       i::Scanner::Location location = scanner_->peek_location();
-      ReportMessageAt(location, "strict_const", NULL);
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "illegal_let", NULL);
       *ok = false;
       return Statement::Default();
     }
-    Consume(i::Token::CONST);
-  } else if (peek() == i::Token::LET) {
+    Consume(i::Token::LET);
     if (var_context != kSourceElement &&
         var_context != kForStatement) {
       i::Scanner::Location location = scanner_->peek_location();
@@ -331,7 +433,6 @@
       *ok = false;
       return Statement::Default();
     }
-    Consume(i::Token::LET);
   } else {
     *ok = false;
     return Statement::Default();
@@ -346,7 +447,7 @@
     // Parse variable name.
     if (nvars > 0) Consume(i::Token::COMMA);
     Identifier identifier  = ParseIdentifier(CHECK_OK);
-    if (strict_mode() && !identifier.IsValidStrictVariable()) {
+    if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_var_name",
                                     identifier,
@@ -354,9 +455,10 @@
       return Statement::Default();
     }
     nvars++;
-    if (peek() == i::Token::ASSIGN) {
+    if (peek() == i::Token::ASSIGN || require_initializer) {
       Expect(i::Token::ASSIGN, CHECK_OK);
       ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
+      if (decl_props != NULL) *decl_props = kHasInitializers;
     }
   } while (peek() == i::Token::COMMA);
 
@@ -372,18 +474,11 @@
 
   Expression expr = ParseExpression(true, CHECK_OK);
   if (expr.IsRawIdentifier()) {
-    if (peek() == i::Token::COLON &&
-        (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+    ASSERT(!expr.AsIdentifier().IsFutureReserved());
+    ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
+    if (peek() == i::Token::COLON) {
       Consume(i::Token::COLON);
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-      }
-      return Statement::Default();
+      return ParseStatement(ok);
     }
     // Preparsing is disabled for extensions (because the extension details
     // aren't passed to lazily compiled functions), so we don't
@@ -476,7 +571,7 @@
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     i::Scanner::Location location = scanner_->location();
     ReportMessageAt(location, "strict_mode_with", NULL);
     *ok = false;
@@ -513,15 +608,7 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-        return Statement::Default();
-      }
+      ParseStatement(CHECK_OK);
     }
     token = peek();
   }
@@ -567,9 +654,14 @@
   if (peek() != i::Token::SEMICOLON) {
     if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
         peek() == i::Token::LET) {
+      bool is_let = peek() == i::Token::LET;
       int decl_count;
-      ParseVariableDeclarations(kForStatement, &decl_count, CHECK_OK);
-      if (peek() == i::Token::IN && decl_count == 1) {
+      VariableDeclarationProperties decl_props = kHasNoInitializers;
+      ParseVariableDeclarations(
+          kForStatement, &decl_props, &decl_count, CHECK_OK);
+      bool accept_IN = decl_count == 1 &&
+          !(is_let && decl_props == kHasInitializers);
+      if (peek() == i::Token::IN && accept_IN) {
         Expect(i::Token::IN, CHECK_OK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RPAREN, CHECK_OK);
@@ -614,7 +706,7 @@
 
   Expect(i::Token::THROW, CHECK_OK);
   if (scanner_->HasAnyLineTerminatorBeforeNext()) {
-    i::JavaScriptScanner::Location pos = scanner_->location();
+    i::Scanner::Location pos = scanner_->location();
     ReportMessageAt(pos, "newline_after_throw", NULL);
     *ok = false;
     return Statement::Default();
@@ -649,7 +741,7 @@
     Consume(i::Token::CATCH);
     Expect(i::Token::LPAREN, CHECK_OK);
     Identifier id = ParseIdentifier(CHECK_OK);
-    if (strict_mode() && !id.IsValidStrictVariable()) {
+    if (!is_classic_mode() && !id.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_catch_variable",
                                     id,
@@ -727,7 +819,8 @@
     return expression;
   }
 
-  if (strict_mode() && expression.IsIdentifier() &&
+  if (!is_classic_mode() &&
+      expression.IsIdentifier() &&
       expression.AsIdentifier().IsEvalOrArguments()) {
     i::Scanner::Location after = scanner_->location();
     ReportMessageAt(before.beg_pos, after.end_pos,
@@ -815,7 +908,8 @@
     op = Next();
     i::Scanner::Location before = scanner_->peek_location();
     Expression expression = ParseUnaryExpression(CHECK_OK);
-    if (strict_mode() && expression.IsIdentifier() &&
+    if (!is_classic_mode() &&
+        expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -837,7 +931,8 @@
   Expression expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       i::Token::IsCountOp(peek())) {
-    if (strict_mode() && expression.IsIdentifier() &&
+    if (!is_classic_mode() &&
+        expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -1024,7 +1119,7 @@
     }
 
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
-      if (strict_mode()) {
+      if (!is_classic_mode()) {
         Next();
         i::Scanner::Location location = scanner_->location();
         ReportMessageAt(location, "strict_reserved_word", NULL);
@@ -1124,7 +1219,7 @@
   if (HasConflict(old_type, type)) {
     if (IsDataDataConflict(old_type, type)) {
       // Both are data properties.
-      if (!strict_mode()) return;
+      if (is_classic_mode()) return;
       ReportMessageAt(scanner_->location(),
                       "strict_duplicate_property", NULL);
     } else if (IsDataAccessorConflict(old_type, type)) {
@@ -1307,9 +1402,6 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
 
-  Expect(i::Token::LBRACE, CHECK_OK);
-  int function_block_pos = scanner_->location().beg_pos;
-
   // Determine if the function will be lazily compiled.
   // Currently only happens to top-level functions.
   // Optimistically assume that all top-level functions are lazily compiled.
@@ -1318,26 +1410,15 @@
                              !parenthesized_function_);
   parenthesized_function_ = false;
 
+  Expect(i::Token::LBRACE, CHECK_OK);
   if (is_lazily_compiled) {
-    log_->PauseRecording();
-    ParseSourceElements(i::Token::RBRACE, ok);
-    log_->ResumeRecording();
-    if (!*ok) Expression::Default();
-
-    Expect(i::Token::RBRACE, CHECK_OK);
-
-    // Position right after terminal '}'.
-    int end_pos = scanner_->location().end_pos;
-    log_->LogFunction(function_block_pos, end_pos,
-                      function_scope.materialized_literal_count(),
-                      function_scope.expected_properties(),
-                      strict_mode() ? 1 : 0);
+    ParseLazyFunctionLiteralBody(CHECK_OK);
   } else {
-    ParseSourceElements(i::Token::RBRACE, CHECK_OK);
-    Expect(i::Token::RBRACE, CHECK_OK);
+    ParseSourceElements(i::Token::RBRACE, ok);
   }
+  Expect(i::Token::RBRACE, CHECK_OK);
 
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     int end_position = scanner_->location().end_pos;
     CheckOctalLiteral(start_position, end_position, CHECK_OK);
     CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
@@ -1348,11 +1429,31 @@
 }
 
 
+void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
+  int body_start = scanner_->location().beg_pos;
+  log_->PauseRecording();
+  ParseSourceElements(i::Token::RBRACE, ok);
+  log_->ResumeRecording();
+  if (!*ok) return;
+
+  // Position right after terminal '}'.
+  ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+  int body_end = scanner_->peek_location().end_pos;
+  log_->LogFunction(body_start, body_end,
+                    scope_->materialized_literal_count(),
+                    scope_->expected_properties(),
+                    language_mode());
+}
+
+
 PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
-
   Expect(i::Token::MOD, CHECK_OK);
+  if (!allow_natives_syntax_) {
+    *ok = false;
+    return Expression::Default();
+  }
   ParseIdentifier(CHECK_OK);
   ParseArguments(ok);
 
@@ -1435,9 +1536,16 @@
       ReportMessageAt(location.beg_pos, location.end_pos,
                       "reserved_word", NULL);
       *ok = false;
+      return GetIdentifierSymbol();
     }
-      // FALLTHROUGH
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
+      if (!is_classic_mode()) {
+        i::Scanner::Location location = scanner_->location();
+        ReportMessageAt(location.beg_pos, location.end_pos,
+                        "strict_reserved_word", NULL);
+        *ok = false;
+      }
+      // FALLTHROUGH
     case i::Token::IDENTIFIER:
       return GetIdentifierSymbol();
     default:
@@ -1450,7 +1558,7 @@
 void PreParser::SetStrictModeViolation(i::Scanner::Location location,
                                        const char* type,
                                        bool* ok) {
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
@@ -1490,7 +1598,7 @@
   } else if (identifier.IsFutureStrictReserved()) {
     type = "strict_reserved_word";
   }
-  if (strict_mode()) {
+  if (!is_classic_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
diff --git a/src/preparser.h b/src/preparser.h
index b97b7cf..fc8a4a0 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -110,19 +110,51 @@
     kPreParseSuccess
   };
 
+
+  PreParser(i::Scanner* scanner,
+            i::ParserRecorder* log,
+            uintptr_t stack_limit,
+            bool allow_lazy,
+            bool allow_natives_syntax)
+      : scanner_(scanner),
+        log_(log),
+        scope_(NULL),
+        stack_limit_(stack_limit),
+        strict_mode_violation_location_(i::Scanner::Location::invalid()),
+        strict_mode_violation_type_(NULL),
+        stack_overflow_(false),
+        allow_lazy_(allow_lazy),
+        allow_natives_syntax_(allow_natives_syntax),
+        parenthesized_function_(false),
+        harmony_scoping_(scanner->HarmonyScoping()) { }
+
   ~PreParser() {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+  static PreParseResult PreParseProgram(i::Scanner* scanner,
                                         i::ParserRecorder* log,
-                                        bool allow_lazy,
+                                        int flags,
                                         uintptr_t stack_limit) {
-    return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
+    bool allow_lazy = (flags & i::kAllowLazy) != 0;
+    bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
+    return PreParser(scanner, log, stack_limit,
+                     allow_lazy, allow_natives_syntax).PreParse();
   }
 
+  // Parses a single function literal, from the opening parentheses before
+  // parameters to the closing brace after the body.
+  // Returns a FunctionEntry describing the body of the funciton in enough
+  // detail that it can be lazily compiled.
+  // The scanner is expected to have matched the "function" keyword and
+  // parameters, and have consumed the initial '{'.
+  // At return, unless an error occured, the scanner is positioned before the
+  // the final '}'.
+  PreParseResult PreParseLazyFunction(i::LanguageMode mode,
+                                      i::ParserRecorder* log);
+
  private:
   // Used to detect duplicates in object literals. Each of the values
   // kGetterProperty, kSetterProperty and kValueProperty represents
@@ -179,6 +211,12 @@
     kForStatement
   };
 
+  // If a list of variable declarations includes any initializers.
+  enum VariableDeclarationProperties {
+    kHasInitializers,
+    kHasNoInitializers
+  };
+
   class Expression;
 
   class Identifier {
@@ -408,7 +446,8 @@
           materialized_literal_count_(0),
           expected_properties_(0),
           with_nesting_count_(0),
-          strict_((prev_ != NULL) && prev_->is_strict()) {
+          language_mode_(
+              (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
       *variable = this;
     }
     ~Scope() { *variable_ = prev_; }
@@ -418,8 +457,15 @@
     int expected_properties() { return expected_properties_; }
     int materialized_literal_count() { return materialized_literal_count_; }
     bool IsInsideWith() { return with_nesting_count_ != 0; }
-    bool is_strict() { return strict_; }
-    void set_strict() { strict_ = true; }
+    bool is_classic_mode() {
+      return language_mode_ == i::CLASSIC_MODE;
+    }
+    i::LanguageMode language_mode() {
+      return language_mode_;
+    }
+    void set_language_mode(i::LanguageMode language_mode) {
+      language_mode_ = language_mode;
+    }
     void EnterWith() { with_nesting_count_++; }
     void LeaveWith() { with_nesting_count_--; }
 
@@ -430,25 +476,9 @@
     int materialized_literal_count_;
     int expected_properties_;
     int with_nesting_count_;
-    bool strict_;
+    i::LanguageMode language_mode_;
   };
 
-  // Private constructor only used in PreParseProgram.
-  PreParser(i::JavaScriptScanner* scanner,
-            i::ParserRecorder* log,
-            uintptr_t stack_limit,
-            bool allow_lazy)
-      : scanner_(scanner),
-        log_(log),
-        scope_(NULL),
-        stack_limit_(stack_limit),
-        strict_mode_violation_location_(i::Scanner::Location::invalid()),
-        strict_mode_violation_type_(NULL),
-        stack_overflow_(false),
-        allow_lazy_(true),
-        parenthesized_function_(false),
-        harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
-
   // Preparse the program. Only called in PreParseProgram after creating
   // the instance.
   PreParseResult PreParse() {
@@ -459,7 +489,7 @@
     if (stack_overflow_) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner_->current_token());
-    } else if (scope_->is_strict()) {
+    } else if (!scope_->is_classic_mode()) {
       CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
     }
     return kPreParseSuccess;
@@ -493,6 +523,7 @@
   Statement ParseVariableStatement(VariableDeclarationContext var_context,
                                    bool* ok);
   Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                      VariableDeclarationProperties* decl_props,
                                       int* num_decl,
                                       bool* ok);
   Statement ParseExpressionOrLabelledStatement(bool* ok);
@@ -527,6 +558,7 @@
 
   Arguments ParseArguments(bool* ok);
   Expression ParseFunctionLiteral(bool* ok);
+  void ParseLazyFunctionLiteralBody(bool* ok);
 
   Identifier ParseIdentifier(bool* ok);
   Identifier ParseIdentifierName(bool* ok);
@@ -562,11 +594,19 @@
 
   bool peek_any_identifier();
 
-  void set_strict_mode() {
-    scope_->set_strict();
+  void set_language_mode(i::LanguageMode language_mode) {
+    scope_->set_language_mode(language_mode);
   }
 
-  bool strict_mode() { return scope_->is_strict(); }
+  bool is_classic_mode() {
+    return scope_->language_mode() == i::CLASSIC_MODE;
+  }
+
+  bool is_extended_mode() {
+    return scope_->language_mode() == i::EXTENDED_MODE;
+  }
+
+  i::LanguageMode language_mode() { return scope_->language_mode(); }
 
   void Consume(i::Token::Value token) { Next(); }
 
@@ -599,7 +639,7 @@
                                      Identifier identifier,
                                      bool* ok);
 
-  i::JavaScriptScanner* scanner_;
+  i::Scanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
@@ -607,8 +647,9 @@
   const char* strict_mode_violation_type_;
   bool stack_overflow_;
   bool allow_lazy_;
+  bool allow_natives_syntax_;
   bool parenthesized_function_;
-  bool harmony_block_scoping_;
+  bool harmony_scoping_;
 };
 } }  // v8::preparser
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 663af28..37c76ce 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -372,13 +372,6 @@
 }
 
 
-void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
-  Print("(");
-  Visit(node->expression());
-  Print("%s null)", Token::String(node->op()));
-}
-
-
 void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
   Print("<this-function>");
 }
@@ -1020,15 +1013,6 @@
 }
 
 
-void AstPrinter::VisitCompareToNull(CompareToNull* node) {
-  const char* name = node->is_strict()
-      ? "COMPARE-TO-NULL-STRICT"
-      : "COMPARE-TO-NULL";
-  IndentedScope indent(this, name, node);
-  Visit(node->expression());
-}
-
-
 void AstPrinter::VisitThisFunction(ThisFunction* node) {
   IndentedScope indent(this, "THIS-FUNCTION");
 }
@@ -1404,16 +1388,6 @@
 }
 
 
-void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
-  TagScope tag(this, "CompareToNull");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("is_strict", expr->is_strict());
-  }
-  Visit(expr->expression());
-}
-
-
 void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
   TagScope tag(this, "ThisFunction");
 }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index e319efb..5626aca 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -110,8 +110,7 @@
   Vector<char> dst = Vector<char>::New(len + 1);
   OS::StrNCpy(dst, src, len);
   dst[len] = '\0';
-  uint32_t hash =
-      HashSequentialString(dst.start(), len, HEAP->HashSeed());
+  uint32_t hash = HashSequentialString(dst.start(), len);
   return AddOrDisposeString(dst.start(), hash);
 }
 
@@ -144,17 +143,18 @@
     DeleteArray(str.start());
     return format;
   }
-  uint32_t hash = HashSequentialString(
-      str.start(), len, HEAP->HashSeed());
+  uint32_t hash = HashSequentialString(str.start(), len);
   return AddOrDisposeString(str.start(), hash);
 }
 
 
 const char* StringsStorage::GetName(String* name) {
   if (name->IsString()) {
-    return AddOrDisposeString(
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
-        name->Hash());
+    int length = Min(kMaxNameSize, name->length());
+    SmartArrayPointer<char> data =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
+    uint32_t hash = HashSequentialString(*data, length);
+    return AddOrDisposeString(data.Detach(), hash);
   }
   return "";
 }
@@ -178,21 +178,18 @@
 
 
 uint32_t CodeEntry::GetCallUid() const {
-  uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
+  uint32_t hash = ComputeIntegerHash(tag_);
   if (shared_id_ != 0) {
-    hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
-                               v8::internal::kZeroHashSeed);
+    hash ^= ComputeIntegerHash(
+        static_cast<uint32_t>(shared_id_));
   } else {
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
-        v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
-        v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
     hash ^= ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
-        v8::internal::kZeroHashSeed);
-    hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
+    hash ^= ComputeIntegerHash(line_number_);
   }
   return hash;
 }
@@ -493,8 +490,6 @@
 
 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
-    CodeMap::CodeEntryInfo(NULL, 0);
 
 
 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -1216,7 +1211,7 @@
       entries_sorted_(false) {
   STATIC_ASSERT(
       sizeof(HeapGraphEdge) ==
-      SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
+      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize);  // NOLINT
   STATIC_ASSERT(
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
@@ -1408,10 +1403,12 @@
   if (entry != NULL) {
     void* value = entry->value;
     entries_map_.Remove(from, AddressHash(from));
-    entry = entries_map_.Lookup(to, AddressHash(to), true);
-    // We can have an entry at the new location, it is OK, as GC can overwrite
-    // dead objects with alive objects being moved.
-    entry->value = value;
+    if (to != NULL) {
+      entry = entries_map_.Lookup(to, AddressHash(to), true);
+      // We can have an entry at the new location, it is OK, as GC can overwrite
+      // dead objects with alive objects being moved.
+      entry->value = value;
+    }
   }
 }
 
@@ -1467,13 +1464,10 @@
 uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
   uint64_t id = static_cast<uint64_t>(info->GetHash());
   const char* label = info->GetLabel();
-  id ^= HashSequentialString(label,
-                             static_cast<int>(strlen(label)),
-                             HEAP->HashSeed());
+  id ^= HashSequentialString(label, static_cast<int>(strlen(label)));
   intptr_t element_count = info->GetElementCount();
   if (element_count != -1)
-    id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
-                             v8::internal::kZeroHashSeed);
+    id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
   return id << 1;
 }
 
@@ -1536,6 +1530,8 @@
 
 
 Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
+  // First perform a full GC in order to avoid dead objects.
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   AssertNoAllocation no_allocation;
   HeapObject* object = NULL;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -1843,12 +1839,13 @@
 }
 
 
-int V8HeapExplorer::EstimateObjectsCount() {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
   int objects_count = 0;
-  for (HeapObject* obj = iterator.next();
+  for (HeapObject* obj = iterator->next();
        obj != NULL;
-       obj = iterator.next(), ++objects_count) {}
+       obj = iterator->next()) {
+    objects_count++;
+  }
   return objects_count;
 }
 
@@ -1921,6 +1918,7 @@
           SetPropertyReference(
               obj, entry,
               heap_->prototype_symbol(), proto_or_map,
+              NULL,
               JSFunction::kPrototypeOrInitialMapOffset);
         } else {
           SetPropertyReference(
@@ -1935,9 +1933,11 @@
       SetInternalReference(js_fun, entry,
                            "context", js_fun->unchecked_context(),
                            JSFunction::kContextOffset);
-      TagObject(js_fun->literals(), "(function literals)");
+      TagObject(js_fun->literals_or_bindings(),
+                "(function literals_or_bindings)");
       SetInternalReference(js_fun, entry,
-                           "literals", js_fun->literals(),
+                           "literals_or_bindings",
+                           js_fun->literals_or_bindings(),
                            JSFunction::kLiteralsOffset);
     }
     TagObject(js_obj->properties(), "(object properties)");
@@ -1954,6 +1954,10 @@
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
+    if (obj->IsSlicedString()) {
+      SlicedString* ss = SlicedString::cast(obj);
+      SetInternalReference(obj, entry, "parent", ss->parent());
+    }
     extract_indexed_refs = false;
   } else if (obj->IsGlobalContext()) {
     Context* context = Context::cast(obj);
@@ -1976,6 +1980,14 @@
                            "descriptors", map->instance_descriptors(),
                            Map::kInstanceDescriptorsOrBitField3Offset);
     }
+    if (map->prototype_transitions() != heap_->empty_fixed_array()) {
+      TagObject(map->prototype_transitions(), "(prototype transitions)");
+      SetInternalReference(obj,
+                           entry,
+                           "prototype_transitions",
+                           map->prototype_transitions(),
+                           Map::kPrototypeTransitionsOffset);
+    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
@@ -2052,20 +2064,27 @@
 void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
                                               HeapEntry* entry) {
   if (js_obj->IsJSFunction()) {
-    HandleScope hs;
     JSFunction* func = JSFunction::cast(js_obj);
     Context* context = func->context();
-    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
-    SerializedScopeInfo* serialized_scope_info =
-        context->closure()->shared()->scope_info();
-    ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
-    int locals_number = zone_scope_info.NumberOfLocals();
-    for (int i = 0; i < locals_number; ++i) {
-      String* local_name = *zone_scope_info.LocalName(i);
-      int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
-      if (idx >= 0 && idx < context->length()) {
-        SetClosureReference(js_obj, entry, local_name, context->get(idx));
-      }
+    ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+
+    // Add context allocated locals.
+    int context_locals = scope_info->ContextLocalCount();
+    for (int i = 0; i < context_locals; ++i) {
+      String* local_name = scope_info->ContextLocalName(i);
+      int idx = Context::MIN_CONTEXT_SLOTS + i;
+      SetClosureReference(js_obj, entry, local_name, context->get(idx));
+    }
+
+    // Add function variable.
+    if (scope_info->HasFunctionName()) {
+      String* name = scope_info->FunctionName();
+      int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
+#ifdef DEBUG
+      VariableMode mode;
+      ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
+#endif
+      SetClosureReference(js_obj, entry, name, context->get(idx));
     }
   }
 }
@@ -2083,6 +2102,7 @@
             SetPropertyReference(
                 js_obj, entry,
                 descs->GetKey(i), js_obj->InObjectPropertyAt(index),
+                NULL,
                 js_obj->GetInObjectPropertyOffset(index));
           } else {
             SetPropertyReference(
@@ -2096,7 +2116,29 @@
               js_obj, entry,
               descs->GetKey(i), descs->GetConstantFunction(i));
           break;
-        default: ;
+        case CALLBACKS: {
+          Object* callback_obj = descs->GetValue(i);
+          if (callback_obj->IsFixedArray()) {
+            FixedArray* accessors = FixedArray::cast(callback_obj);
+            if (Object* getter = accessors->get(JSObject::kGetterIndex)) {
+              SetPropertyReference(js_obj, entry, descs->GetKey(i),
+                                   getter, "get-%s");
+            }
+            if (Object* setter = accessors->get(JSObject::kSetterIndex)) {
+              SetPropertyReference(js_obj, entry, descs->GetKey(i),
+                                   setter, "set-%s");
+            }
+          }
+          break;
+        }
+        case NORMAL:  // only in slow mode
+        case HANDLER:  // only in lookup results, not in descriptors
+        case INTERCEPTOR:  // only in lookup results, not in descriptors
+        case MAP_TRANSITION:  // we do not care about transitions here...
+        case ELEMENTS_TRANSITION:
+        case CONSTANT_TRANSITION:
+        case NULL_DESCRIPTOR:  // ... and not about "holes"
+          break;
       }
     }
   } else {
@@ -2135,7 +2177,7 @@
       }
     }
   } else if (js_obj->HasDictionaryElements()) {
-    SeededNumberDictionary* dictionary = js_obj->element_dictionary();
+    NumberDictionary* dictionary = js_obj->element_dictionary();
     int length = dictionary->Capacity();
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
@@ -2161,15 +2203,16 @@
 
 
 String* V8HeapExplorer::GetConstructorName(JSObject* object) {
-  if (object->IsJSFunction()) return HEAP->closure_symbol();
+  Heap* heap = object->GetHeap();
+  if (object->IsJSFunction()) return heap->closure_symbol();
   String* constructor_name = object->constructor_name();
-  if (constructor_name == HEAP->Object_symbol()) {
+  if (constructor_name == heap->Object_symbol()) {
     // Look up an immediate "constructor" property, if it is a function,
     // return its name. This is for instances of binding objects, which
     // have prototype constructor type "Object".
     Object* constructor_prop = NULL;
-    LookupResult result;
-    object->LocalLookupRealNamedProperty(HEAP->constructor_symbol(), &result);
+    LookupResult result(heap->isolate());
+    object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
     if (result.IsProperty()) {
       constructor_prop = result.GetLazyValue();
     }
@@ -2206,9 +2249,11 @@
 
 bool V8HeapExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  filler_ = filler;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
+
+  filler_ = filler;
   bool interrupted = false;
+
   // Heap iteration with filtering must be finished in any case.
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -2318,15 +2363,23 @@
                                           HeapEntry* parent_entry,
                                           String* reference_name,
                                           Object* child_obj,
+                                          const char* name_format_string,
                                           int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     HeapGraphEdge::Type type = reference_name->length() > 0 ?
         HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
+    const char* name = name_format_string  != NULL ?
+        collection_->names()->GetFormatted(
+            name_format_string,
+            *reference_name->ToCString(DISALLOW_NULLS,
+                                       ROBUST_STRING_TRAVERSAL)) :
+        collection_->names()->GetName(reference_name);
+
     filler_->SetNamedReference(type,
                                parent_obj,
                                parent_entry,
-                               collection_->names()->GetName(reference_name),
+                               name,
                                child_obj,
                                child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -2417,6 +2470,7 @@
 
 // Modifies heap. Must not be run during heap traversal.
 void V8HeapExplorer::TagGlobalObjects() {
+  HandleScope scope;
   Isolate* isolate = Isolate::Current();
   GlobalObjectsEnumerator enumerator;
   isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -2427,6 +2481,7 @@
   const char** urls = NewArray<const char*>(enumerator.count());
   for (int i = 0, l = enumerator.count(); i < l; ++i) {
     urls[i] = NULL;
+    HandleScope scope;
     Handle<JSGlobalObject> global_obj = enumerator.at(i);
     Object* obj_document;
     if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
@@ -2774,13 +2829,43 @@
 bool HeapSnapshotGenerator::GenerateSnapshot() {
   v8_heap_explorer_.TagGlobalObjects();
 
+  // TODO(1562) Profiler assumes that any object that is in the heap after
+  // full GC is reachable from the root when computing dominators.
+  // This is not true for weakly reachable objects.
+  // As a temporary solution we call GC twice.
+  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
+#ifdef DEBUG
+  Heap* debug_heap = Isolate::Current()->heap();
+  ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->code_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+  // The following code uses heap iterators, so we want the heap to be
+  // stable. It should follow TagGlobalObjects as that can allocate.
   AssertNoAllocation no_alloc;
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   SetProgressTotal(4);  // 2 passes + dominators + sizes.
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   // Pass 1. Iterate heap contents to count entries and references.
   if (!CountEntriesAndReferences()) return false;
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   // Allocate and fill entries in the snapshot, allocate references.
   snapshot_->AllocateEntries(entries_.entries_count(),
                              entries_.total_children_count(),
@@ -2818,8 +2903,9 @@
 
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   progress_total_ = (
-      v8_heap_explorer_.EstimateObjectsCount() +
+      v8_heap_explorer_.EstimateObjectsCount(&iterator) +
       dom_explorer_.EstimateObjectsCount()) * iterations_count;
   progress_counter_ = 0;
 }
@@ -2869,7 +2955,7 @@
       nodes_to_visit.RemoveLast();
     }
   }
-  entries->Truncate(current_entry);
+  ASSERT_EQ(current_entry, entries->length());
 }
 
 
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 0beb109..44be3db 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -74,6 +74,8 @@
   inline const char* GetFunctionName(const char* name);
 
  private:
+  static const int kMaxNameSize = 1024;
+
   INLINE(static bool StringsMatch(void* key1, void* key2)) {
     return strcmp(reinterpret_cast<char*>(key1),
                   reinterpret_cast<char*>(key2)) == 0;
@@ -257,7 +259,7 @@
     typedef Address Key;
     typedef CodeEntryInfo Value;
     static const Key kNoKey;
-    static const Value kNoValue;
+    static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
     static int Compare(const Key& a, const Key& b) {
       return a < b ? -1 : (a > b ? 1 : 0);
     }
@@ -550,7 +552,10 @@
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
   HeapEntry* dominator() { return dominator_; }
-  void set_dominator(HeapEntry* entry) { dominator_ = entry; }
+  void set_dominator(HeapEntry* entry) {
+    ASSERT(entry != NULL);
+    dominator_ = entry;
+  }
 
   void clear_paint() { painted_ = kUnpainted; }
   bool painted_reachable() { return painted_ == kPainted; }
@@ -735,8 +740,7 @@
 
   static uint32_t AddressHash(Address addr) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
-        v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
   }
 
   bool initial_fill_mode_;
@@ -837,8 +841,7 @@
 
   static uint32_t Hash(HeapThing thing) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
-        v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
   }
   static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
     return key1 == key2;
@@ -922,7 +925,7 @@
   virtual HeapEntry* AllocateEntry(
       HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
-  int EstimateObjectsCount();
+  int EstimateObjectsCount(HeapIterator* iterator);
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
@@ -970,6 +973,7 @@
                             HeapEntry* parent,
                             String* reference_name,
                             Object* child,
+                            const char* name_format_string = NULL,
                             int field_offset = -1);
   void SetPropertyShortcutReference(HeapObject* parent_obj,
                                     HeapEntry* parent,
@@ -1020,8 +1024,7 @@
   void VisitSubtreeWrapper(Object** p, uint16_t class_id);
 
   static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
-    return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
-                              v8::internal::kZeroHashSeed);
+    return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
   }
   static bool RetainedInfosMatch(void* key1, void* key2) {
     return key1 == key2 ||
@@ -1099,8 +1102,7 @@
 
   INLINE(static uint32_t ObjectHash(const void* key)) {
     return ComputeIntegerHash(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
-        v8::internal::kZeroHashSeed);
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
   }
 
   void EnumerateNodes();
diff --git a/src/property-details.h b/src/property-details.h
new file mode 100644
index 0000000..135c2ca
--- /dev/null
+++ b/src/property-details.h
@@ -0,0 +1,182 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_H_
+#define V8_PROPERTY_DETAILS_H_
+
+#include "../include/v8.h"
+#include "allocation.h"
+#include "utils.h"
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+  NONE              = v8::None,
+  READ_ONLY         = v8::ReadOnly,
+  DONT_ENUM         = v8::DontEnum,
+  DONT_DELETE       = v8::DontDelete,
+  ABSENT            = 16  // Used in runtime to indicate a property is absent.
+  // ABSENT can never be stored in or returned from a descriptor's attributes
+  // bitfield.  It is only used as a return value meaning the attributes of
+  // a non-existent property.
+};
+
+
+namespace v8 {
+namespace internal {
+
+class Smi;
+
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-debugger.js.
+enum PropertyType {
+  NORMAL                    = 0,  // only in slow mode
+  FIELD                     = 1,  // only in fast mode
+  CONSTANT_FUNCTION         = 2,  // only in fast mode
+  CALLBACKS                 = 3,
+  HANDLER                   = 4,  // only in lookup results, not in descriptors
+  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
+  // All properties before MAP_TRANSITION are real.
+  MAP_TRANSITION            = 6,  // only in fast mode
+  ELEMENTS_TRANSITION       = 7,
+  CONSTANT_TRANSITION       = 8,  // only in fast mode
+  NULL_DESCRIPTOR           = 9,  // only in fast mode
+  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+  // nonexistent properties.
+  NONEXISTENT = NULL_DESCRIPTOR
+};
+
+
+inline bool IsTransitionType(PropertyType type) {
+  switch (type) {
+    case MAP_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case ELEMENTS_TRANSITION:
+      return true;
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case CALLBACKS:
+    case HANDLER:
+    case INTERCEPTOR:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // keep the compiler happy
+  return false;
+}
+
+
+inline bool IsRealProperty(PropertyType type) {
+  switch (type) {
+    case NORMAL:
+    case FIELD:
+    case CONSTANT_FUNCTION:
+    case CALLBACKS:
+    case HANDLER:
+    case INTERCEPTOR:
+      return true;
+    case MAP_TRANSITION:
+    case ELEMENTS_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case NULL_DESCRIPTOR:
+      return false;
+  }
+  UNREACHABLE();  // keep the compiler happy
+  return false;
+}
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+  PropertyDetails(PropertyAttributes attributes,
+                  PropertyType type,
+                  int index = 0) {
+    ASSERT(TypeField::is_valid(type));
+    ASSERT(AttributesField::is_valid(attributes));
+    ASSERT(StorageField::is_valid(index));
+
+    value_ = TypeField::encode(type)
+        | AttributesField::encode(attributes)
+        | StorageField::encode(index);
+
+    ASSERT(type == this->type());
+    ASSERT(attributes == this->attributes());
+    ASSERT(index == this->index());
+  }
+
+  // Conversion for storing details as Object*.
+  explicit inline PropertyDetails(Smi* smi);
+  inline Smi* AsSmi();
+
+  PropertyType type() { return TypeField::decode(value_); }
+
+  bool IsTransition() {
+    PropertyType t = type();
+    ASSERT(t != INTERCEPTOR);
+    return IsTransitionType(t);
+  }
+
+  bool IsProperty() {
+    return IsRealProperty(type());
+  }
+
+  PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+  int index() { return StorageField::decode(value_); }
+
+  inline PropertyDetails AsDeleted();
+
+  static bool IsValidIndex(int index) {
+    return StorageField::is_valid(index);
+  }
+
+  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+  // Bit fields in value_ (type, shift, size). Must be public so the
+  // constants can be embedded in generated code.
+  class TypeField:       public BitField<PropertyType,       0, 4> {};
+  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+  class DeletedField:    public BitField<uint32_t,           7, 1> {};
+  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
+
+  static const int kInitialIndex = 1;
+
+ private:
+  uint32_t value_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_PROPERTY_DETAILS_H_
diff --git a/src/property.cc b/src/property.cc
index 7cc2df5..6e043e2 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -31,6 +31,15 @@
 namespace internal {
 
 
+void LookupResult::Iterate(ObjectVisitor* visitor) {
+  LookupResult* current = this;  // Could be NULL.
+  while (current != NULL) {
+    visitor->VisitPointer(BitCast<Object**>(&current->holder_));
+    current = current->next_;
+  }
+}
+
+
 #ifdef OBJECT_PRINT
 void LookupResult::Print(FILE* out) {
   if (!IsFound()) {
diff --git a/src/property.h b/src/property.h
index e7d9fc5..3203dd1 100644
--- a/src/property.h
+++ b/src/property.h
@@ -115,11 +115,9 @@
 class ElementsTransitionDescriptor: public Descriptor {
  public:
   ElementsTransitionDescriptor(String* key,
-                               Map* map,
-                               ElementsKind elements_kind)
-      : Descriptor(key, map, PropertyDetails(NONE,
-                                             ELEMENTS_TRANSITION,
-                                             elements_kind)) { }
+                               Object* map_or_array)
+      : Descriptor(key, map_or_array, PropertyDetails(NONE,
+                                                      ELEMENTS_TRANSITION)) { }
 };
 
 // Marks a field name in a map so that adding the field is guaranteed
@@ -166,10 +164,20 @@
 
 class LookupResult BASE_EMBEDDED {
  public:
-  LookupResult()
-      : lookup_type_(NOT_FOUND),
+  explicit LookupResult(Isolate* isolate)
+      : isolate_(isolate),
+        next_(isolate->top_lookup_result()),
+        lookup_type_(NOT_FOUND),
+        holder_(NULL),
         cacheable_(true),
-        details_(NONE, NORMAL) {}
+        details_(NONE, NORMAL) {
+    isolate->SetTopLookupResult(this);
+  }
+
+  ~LookupResult() {
+    ASSERT(isolate_->top_lookup_result() == this);
+    isolate_->SetTopLookupResult(next_);
+  }
 
   void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
     lookup_type_ = DESCRIPTOR_TYPE;
@@ -202,9 +210,9 @@
     number_ = entry;
   }
 
-  void HandlerResult() {
+  void HandlerResult(JSProxy* proxy) {
     lookup_type_ = HANDLER_TYPE;
-    holder_ = NULL;
+    holder_ = proxy;
     details_ = PropertyDetails(NONE, HANDLER);
     cacheable_ = false;
   }
@@ -217,11 +225,17 @@
 
   void NotFound() {
     lookup_type_ = NOT_FOUND;
+    holder_ = NULL;
   }
 
   JSObject* holder() {
     ASSERT(IsFound());
-    return holder_;
+    return JSObject::cast(holder_);
+  }
+
+  JSProxy* proxy() {
+    ASSERT(IsFound());
+    return JSProxy::cast(holder_);
   }
 
   PropertyType type() {
@@ -248,7 +262,7 @@
   // Is the result is a property excluding transitions and the null
   // descriptor?
   bool IsProperty() {
-    return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+    return IsFound() && GetPropertyDetails().IsProperty();
   }
 
   // Is the result a property or a transition?
@@ -278,10 +292,10 @@
     }
   }
 
+
   Map* GetTransitionMap() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
-    ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
-           type() == ELEMENTS_TRANSITION);
+    ASSERT(IsTransitionType(type()));
     return Map::cast(GetValue());
   }
 
@@ -343,7 +357,12 @@
     return holder()->GetNormalizedProperty(this);
   }
 
+  void Iterate(ObjectVisitor* visitor);
+
  private:
+  Isolate* isolate_;
+  LookupResult* next_;
+
   // Where did we find the result;
   enum {
     NOT_FOUND,
@@ -354,7 +373,7 @@
     CONSTANT_TYPE
   } lookup_type_;
 
-  JSObject* holder_;
+  JSReceiver* holder_;
   int number_;
   bool cacheable_;
   PropertyDetails details_;
diff --git a/src/proxy.js b/src/proxy.js
index 4e44cd4..3cd467f 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -32,7 +32,10 @@
 $Proxy.create = function(handler, proto) {
   if (!IS_SPEC_OBJECT(handler))
     throw MakeTypeError("handler_non_object", ["create"])
-  if (!IS_SPEC_OBJECT(proto)) proto = null  // Mozilla does this...
+  if (IS_UNDEFINED(proto))
+    proto = null
+  else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+    throw MakeTypeError("proto_non_object", ["create"])
   return %CreateJSProxy(handler, proto)
 }
 
@@ -42,8 +45,14 @@
   if (!IS_SPEC_FUNCTION(callTrap))
     throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
   if (IS_UNDEFINED(constructTrap)) {
-    constructTrap = callTrap
-  } else if (!IS_SPEC_FUNCTION(constructTrap)) {
+    constructTrap = DerivedConstructTrap(callTrap)
+  } else if (IS_SPEC_FUNCTION(constructTrap)) {
+    // Make sure the trap receives 'undefined' as this.
+    var construct = constructTrap
+    constructTrap = function() {
+      return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+    }
+  } else {
     throw MakeTypeError("trap_function_expected",
                         ["createFunction", "construct"])
   }
@@ -57,6 +66,17 @@
 // Builtins
 ////////////////////////////////////////////////////////////////////////////////
 
+function DerivedConstructTrap(callTrap) {
+  return function() {
+    var proto = this.prototype
+    if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+    var obj = new $Object()
+    obj.__proto__ = proto
+    var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
+    return IS_SPEC_OBJECT(result) ? result : obj
+  }
+}
+
 function DelegateCallAndConstruct(callTrap, constructTrap) {
   return function() {
     return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
@@ -136,9 +156,32 @@
   var enumerableNames = []
   for (var i = 0, count = 0; i < names.length; ++i) {
     var name = names[i]
-    if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
+    var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
+    if (!IS_UNDEFINED(desc) && desc.enumerable) {
       enumerableNames[count++] = names[i]
     }
   }
   return enumerableNames
 }
+
+function DerivedEnumerateTrap() {
+  var names = this.getPropertyNames()
+  var enumerableNames = []
+  for (var i = 0, count = 0; i < names.length; ++i) {
+    var name = names[i]
+    var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
+    if (!IS_UNDEFINED(desc) && desc.enumerable) {
+      enumerableNames[count++] = names[i]
+    }
+  }
+  return enumerableNames
+}
+
+function ProxyEnumerate(proxy) {
+  var handler = %GetHandler(proxy)
+  if (IS_UNDEFINED(handler.enumerate)) {
+    return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
+  } else {
+    return ToStringArray(handler.enumerate(), "enumerate")
+  }
+}
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index b32d71d..f843278 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@
     RegExpMacroAssembler* assembler) :
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 4);
-  const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
+  ASSERT(type < 5);
+  const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
 
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index f91ea93..99f3a37 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -81,7 +81,7 @@
   if (subject->IsAsciiRepresentation()) {
     const byte* address;
     if (StringShape(subject).IsExternal()) {
-      const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+      const char* data = ExternalAsciiString::cast(subject)->GetChars();
       address = reinterpret_cast<const byte*>(data);
     } else {
       ASSERT(subject->IsSeqAsciiString());
@@ -92,7 +92,7 @@
   }
   const uc16* data;
   if (StringShape(subject).IsExternal()) {
-    data = ExternalTwoByteString::cast(subject)->resource()->data();
+    data = ExternalTwoByteString::cast(subject)->GetChars();
   } else {
     ASSERT(subject->IsSeqTwoByteString());
     data = SeqTwoByteString::cast(subject)->GetChars();
diff --git a/src/regexp.js b/src/regexp.js
index 38d4496..596c185 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -95,12 +95,11 @@
   }
 }
 
-
 // Deprecated RegExp.prototype.compile method.  We behave like the constructor
 // were called again.  In SpiderMonkey, this method returns the regexp object.
 // In JSC, it returns undefined.  For compatibility with JSC, we match their
 // behavior.
-function CompileRegExp(pattern, flags) {
+function RegExpCompile(pattern, flags) {
   // Both JSC and SpiderMonkey treat a missing pattern argument as the
   // empty subject string, and an actual undefined value passed as the
   // pattern as the string 'undefined'.  Note that JSC is inconsistent
@@ -108,6 +107,11 @@
   // RegExp.prototype.compile and in the constructor, where they are
   // the empty string.  For compatibility with JSC, we match their
   // behavior.
+  if (this == $RegExp.prototype) {
+    // We don't allow recompiling RegExp.prototype.
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['RegExp.prototype.compile', this]);
+  }
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
     DoConstructRegExp(this, 'undefined', flags);
   } else {
@@ -170,13 +174,6 @@
                         ['RegExp.prototype.exec', this]);
   }
 
-  if (%_ArgumentsLength() === 0) {
-    var regExpInput = LAST_INPUT(lastMatchInfo);
-    if (IS_UNDEFINED(regExpInput)) {
-      throw MakeError('no_input_to_regexp', [this]);
-    }
-    string = regExpInput;
-  }
   string = TO_STRING_INLINE(string);
   var lastIndex = this.lastIndex;
 
@@ -225,14 +222,6 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['RegExp.prototype.test', this]);
   }
-  if (%_ArgumentsLength() == 0) {
-    var regExpInput = LAST_INPUT(lastMatchInfo);
-    if (IS_UNDEFINED(regExpInput)) {
-      throw MakeError('no_input_to_regexp', [this]);
-    }
-    string = regExpInput;
-  }
-
   string = TO_STRING_INLINE(string);
 
   var lastIndex = this.lastIndex;
@@ -408,7 +397,6 @@
 function SetUpRegExp() {
   %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
-  %FunctionSetPrototype($RegExp, new $Object());
   %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
   %SetCode($RegExp, RegExpConstructor);
 
@@ -416,7 +404,7 @@
     "exec", RegExpExec,
     "test", RegExpTest,
     "toString", RegExpToString,
-    "compile", CompileRegExp
+    "compile", RegExpCompile
   ));
 
   // The length of compile is 1 in SpiderMonkey.
@@ -431,14 +419,18 @@
   }
   function RegExpSetInput(string) {
     LAST_INPUT(lastMatchInfo) = ToString(string);
-  };
+  }
 
   %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
   %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
-  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput,
+                  DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput,
+                  DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput,
+                  DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput,
+                  DONT_ENUM | DONT_DELETE);
 
   // The properties multiline and $* are aliases for each other.  When this
   // value is set in SpiderMonkey, the value it is set to is coerced to a
@@ -449,38 +441,51 @@
 
   // Getter and setter for multiline.
   var multiline = false;
-  function RegExpGetMultiline() { return multiline; };
-  function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
+  function RegExpGetMultiline() { return multiline; }
+  function RegExpSetMultiline(flag) { multiline = flag ? true : false; }
 
-  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
-  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
-  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline,
+                  DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline,
+                  DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline,
+                  DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline,
+                  DONT_ENUM | DONT_DELETE);
 
 
   function NoOpSetter(ignored) {}
 
 
   // Static properties set by a successful match.
-  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch,
+                  DONT_DELETE);
   %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch,
+                  DONT_ENUM | DONT_DELETE);
   %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen,
+                  DONT_DELETE);
   %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen,
+                  DONT_ENUM | DONT_DELETE);
   %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext,
+                  DONT_DELETE);
   %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext,
+                  DONT_ENUM | DONT_DELETE);
   %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext,
+                  DONT_DELETE);
   %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
-  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext,
+                  DONT_ENUM | DONT_DELETE);
   %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
 
   for (var i = 1; i < 10; ++i) {
-    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i),
+                    DONT_DELETE);
     %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
   }
 }
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 3d4c2dc..a70cd82 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -236,10 +236,22 @@
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
+      ASSERT(function->end_position() != RelocInfo::kNoPosition);
       Isolate* isolate = info->isolate();
       Zone* zone = isolate->zone();
-      VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
-      body->Add(new(zone) ReturnStatement(result_proxy));
+      // Set the position of the assignment statement one character past the
+      // source code, such that it definitely is not in the source code range
+      // of an immediate inner scope. For example in
+      //   eval('with ({x:1}) x = 1');
+      // the end position of the function generated for executing the eval code
+      // coincides with the end of the with scope which is the position of '1'.
+      int position = function->end_position();
+      VariableProxy* result_proxy = new(zone) VariableProxy(
+          isolate, result->name(), false, position);
+      result_proxy->BindTo(result);
+      Statement* result_statement = new(zone) ReturnStatement(result_proxy);
+      result_statement->set_statement_pos(position);
+      body->Add(result_statement);
     }
   }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 26d8846..9837ce7 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -35,6 +35,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "mark-compact.h"
 #include "platform.h"
 #include "scopeinfo.h"
@@ -135,14 +136,13 @@
   // Get the stack check stub code object to match against.  We aren't
   // prepared to generate it, but we don't expect to have to.
   StackCheckStub check_stub;
-  Object* check_code;
-  MaybeObject* maybe_check_code = check_stub.TryGetCode();
-  if (maybe_check_code->ToObject(&check_code)) {
+  Code* stack_check_code = NULL;
+  if (check_stub.FindCodeInCache(&stack_check_code)) {
     Code* replacement_code =
         isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
     Code* unoptimized_code = shared->code();
     Deoptimizer::PatchStackCheckCode(unoptimized_code,
-                                     Code::cast(check_code),
+                                     stack_check_code,
                                      replacement_code);
   }
 }
@@ -207,7 +207,8 @@
       }
     }
 
-    if (function->IsMarkedForLazyRecompilation()) {
+    if (function->IsMarkedForLazyRecompilation() &&
+        function->shared()->code()->kind() == Code::FUNCTION) {
       Code* unoptimized = function->shared()->code();
       int nesting = unoptimized->allow_osr_at_loop_nesting_level();
       if (nesting == 0) AttemptOnStackReplacement(function);
@@ -338,7 +339,8 @@
 void RuntimeProfiler::RemoveDeadSamples() {
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* function = sampler_window_[i];
-    if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
+    if (function != NULL &&
+        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
       sampler_window_[i] = NULL;
     }
   }
diff --git a/src/runtime.cc b/src/runtime.cc
index b1c4c10..a2e569b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -42,6 +42,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "jsregexp.h"
 #include "json-parser.h"
 #include "liveedit.h"
@@ -105,6 +106,27 @@
   type name = NumberTo##Type(obj);
 
 
+// Assert that the given argument has a valid value for a StrictModeFlag
+// and store it in a StrictModeFlag variable with the given name.
+#define CONVERT_STRICT_MODE_ARG(name, index)                         \
+  ASSERT(args[index]->IsSmi());                                      \
+  ASSERT(args.smi_at(index) == kStrictMode ||                        \
+         args.smi_at(index) == kNonStrictMode);                      \
+  StrictModeFlag name =                                              \
+      static_cast<StrictModeFlag>(args.smi_at(index));
+
+
+// Assert that the given argument has a valid value for a LanguageMode
+// and store it in a LanguageMode variable with the given name.
+#define CONVERT_LANGUAGE_MODE_ARG(name, index)                       \
+  ASSERT(args[index]->IsSmi());                                      \
+  ASSERT(args.smi_at(index) == CLASSIC_MODE ||                       \
+         args.smi_at(index) == STRICT_MODE ||                        \
+         args.smi_at(index) == EXTENDED_MODE);                       \
+  LanguageMode name =                                                \
+      static_cast<LanguageMode>(args.smi_at(index));
+
+
 MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
                                                    JSObject* boilerplate) {
   StackLimitCheck check(isolate);
@@ -177,6 +199,7 @@
   // Pixel elements cannot be created using an object literal.
   ASSERT(!copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
       if (elements->map() == heap->fixed_cow_array_map()) {
@@ -189,6 +212,9 @@
       } else {
         for (int i = 0; i < elements->length(); i++) {
           Object* value = elements->get(i);
+          ASSERT(value->IsSmi() ||
+                 value->IsTheHole() ||
+                 (copy->GetElementsKind() == FAST_ELEMENTS));
           if (value->IsJSObject()) {
             JSObject* js_object = JSObject::cast(value);
             { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -202,7 +228,7 @@
       break;
     }
     case DICTIONARY_ELEMENTS: {
-      SeededNumberDictionary* element_dictionary = copy->element_dictionary();
+      NumberDictionary* element_dictionary = copy->element_dictionary();
       int capacity = element_dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Object* k = element_dictionary->KeyAt(i);
@@ -240,18 +266,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
-  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
-  return DeepCopyBoilerplate(isolate, boilerplate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
-  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
-  return isolate->heap()->CopyJSObject(boilerplate);
-}
-
-
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context,
     Handle<FixedArray> constant_properties,
@@ -417,6 +431,9 @@
 }
 
 
+static const int kSmiOnlyLiteralMinimumLength = 1024;
+
+
 static Handle<Object> CreateArrayLiteralBoilerplate(
     Isolate* isolate,
     Handle<FixedArray> literals,
@@ -424,37 +441,77 @@
   // Create the JSArray.
   Handle<JSFunction> constructor(
       JSFunction::GlobalContextFromLiterals(*literals)->array_function());
-  Handle<Object> object = isolate->factory()->NewJSObject(constructor);
+  Handle<JSArray> object =
+      Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
 
-  const bool is_cow =
-      (elements->map() == isolate->heap()->fixed_cow_array_map());
-  Handle<FixedArray> copied_elements =
-      is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(elements->get(1)));
 
-  Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
-  if (is_cow) {
-#ifdef DEBUG
-    // Copy-on-write arrays must be shallow (and simple).
-    for (int i = 0; i < content->length(); i++) {
-      ASSERT(!content->get(i)->IsFixedArray());
-    }
-#endif
+  ASSERT(FLAG_smi_only_arrays || constant_elements_kind == FAST_ELEMENTS ||
+         constant_elements_kind == FAST_SMI_ONLY_ELEMENTS);
+  bool allow_literal_kind_transition = FLAG_smi_only_arrays &&
+      constant_elements_kind > object->GetElementsKind();
+
+  if (!FLAG_smi_only_arrays &&
+      constant_elements_values->length() > kSmiOnlyLiteralMinimumLength &&
+      constant_elements_kind != object->GetElementsKind()) {
+    allow_literal_kind_transition = true;
+  }
+
+  // If the ElementsKind of the constant values of the array literal are less
+  // specific than the ElementsKind of the boilerplate array object, change the
+  // boilerplate array object's map to reflect that kind.
+  if (allow_literal_kind_transition) {
+    Handle<Map> transitioned_array_map =
+        isolate->factory()->GetElementsTransitionMap(object,
+                                                     constant_elements_kind);
+    object->set_map(*transitioned_array_map);
+  }
+
+  Handle<FixedArrayBase> copied_elements_values;
+  if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
+    ASSERT(FLAG_smi_only_arrays);
+    copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+        Handle<FixedDoubleArray>::cast(constant_elements_values));
   } else {
-    for (int i = 0; i < content->length(); i++) {
-      if (content->get(i)->IsFixedArray()) {
-        // The value contains the constant_properties of a
-        // simple object or array literal.
-        Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
-        Handle<Object> result =
-            CreateLiteralBoilerplate(isolate, literals, fa);
-        if (result.is_null()) return result;
-        content->set(i, *result);
+    ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           constant_elements_kind == FAST_ELEMENTS);
+    const bool is_cow =
+        (constant_elements_values->map() ==
+         isolate->heap()->fixed_cow_array_map());
+    if (is_cow) {
+      copied_elements_values = constant_elements_values;
+#if DEBUG
+      Handle<FixedArray> fixed_array_values =
+          Handle<FixedArray>::cast(copied_elements_values);
+      for (int i = 0; i < fixed_array_values->length(); i++) {
+        ASSERT(!fixed_array_values->get(i)->IsFixedArray());
+      }
+#endif
+    } else {
+      Handle<FixedArray> fixed_array_values =
+          Handle<FixedArray>::cast(constant_elements_values);
+      Handle<FixedArray> fixed_array_values_copy =
+          isolate->factory()->CopyFixedArray(fixed_array_values);
+      copied_elements_values = fixed_array_values_copy;
+      for (int i = 0; i < fixed_array_values->length(); i++) {
+        Object* current = fixed_array_values->get(i);
+        if (current->IsFixedArray()) {
+          // The value contains the constant_properties of a
+          // simple object or array literal.
+          Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
+          Handle<Object> result =
+              CreateLiteralBoilerplate(isolate, literals, fa);
+          if (result.is_null()) return result;
+          fixed_array_values_copy->set(i, *result);
+        }
       }
     }
   }
-
-  // Set the elements.
-  Handle<JSArray>::cast(object)->SetContent(*content);
+  object->set_elements(*copied_elements_values);
+  object->set_length(Smi::FromInt(copied_elements_values->length()));
   return object;
 }
 
@@ -487,28 +544,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
-  // Takes a FixedArray of elements containing the literal elements of
-  // the array literal and produces JSArray with those elements.
-  // Additionally takes the literals array of the surrounding function
-  // which contains the context from which to get the Array function
-  // to use for creating the array literal.
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
-  Handle<Object> object =
-      CreateArrayLiteralBoilerplate(isolate, literals, elements);
-  if (object.is_null()) return Failure::Exception();
-
-  // Update the functions literal and return the boilerplate.
-  literals->set(literals_index, *object);
-  return *object;
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
@@ -669,6 +704,82 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSSet, holder, 0);
+  Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
+  holder->set_table(*table);
+  return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  table = ObjectHashSetAdd(table, key);
+  holder->set_table(*table);
+  return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  return isolate->heap()->ToBoolean(table->Contains(*key));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSSet, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
+  table = ObjectHashSetRemove(table, key);
+  holder->set_table(*table);
+  return isolate->heap()->undefined_symbol();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSMap, holder, 0);
+  Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
+  holder->set_table(*table);
+  return *holder;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSMap, holder, 0);
+  Handle<Object> key(args[1]);
+  return ObjectHashTable::cast(holder->table())->Lookup(*key);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(JSMap, holder, 0);
+  Handle<Object> key(args[1]);
+  Handle<Object> value(args[2]);
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+  holder->set_table(*new_table);
+  return *value;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -685,10 +796,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
-  // because they cannot be cast to JSObject to get an identity hash code.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
-  return weakmap->table()->Lookup(*key);
+  CONVERT_ARG_CHECKED(JSReceiver, key, 1);
+  return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
 }
 
 
@@ -696,10 +805,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): See Runtime_WeakMapGet above.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  CONVERT_ARG_CHECKED(JSReceiver, key, 1);
   Handle<Object> value(args[2]);
-  Handle<ObjectHashTable> table(weakmap->table());
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   weakmap->set_table(*new_table);
   return *value;
@@ -752,49 +860,6 @@
 }
 
 
-// Inserts an object as the hidden prototype of another object.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSObject, jsobject, args[0]);
-  CONVERT_CHECKED(JSObject, proto, args[1]);
-
-  // Sanity checks.  The old prototype (that we are replacing) could
-  // theoretically be null, but if it is not null then check that we
-  // didn't already install a hidden prototype here.
-  RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
-    !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
-  RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
-
-  // Allocate up front before we start altering state in case we get a GC.
-  Object* map_or_failure;
-  { MaybeObject* maybe_map_or_failure = proto->map()->CopyDropTransitions();
-    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
-      return maybe_map_or_failure;
-    }
-  }
-  Map* new_proto_map = Map::cast(map_or_failure);
-
-  { MaybeObject* maybe_map_or_failure = jsobject->map()->CopyDropTransitions();
-    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
-      return maybe_map_or_failure;
-    }
-  }
-  Map* new_map = Map::cast(map_or_failure);
-
-  // Set proto's prototype to be the old prototype of the object.
-  new_proto_map->set_prototype(jsobject->GetPrototype());
-  proto->set_map(new_proto_map);
-  new_proto_map->set_is_hidden_prototype();
-
-  // Set the object's prototype to proto.
-  new_map->set_prototype(proto);
-  jsobject->set_map(new_map);
-
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
@@ -929,7 +994,7 @@
   HandleScope scope(isolate);
   Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
   Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
-  LookupResult result;
+  LookupResult result(isolate);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_CHECKED(String, name, 1);
 
@@ -960,7 +1025,7 @@
       case JSObject::INTERCEPTED_ELEMENT:
       case JSObject::FAST_ELEMENT: {
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-        Handle<Object> value = GetElement(obj, index);
+        Handle<Object> value = Object::GetElement(obj, index);
         RETURN_IF_EMPTY_HANDLE(isolate, value);
         elms->set(VALUE_INDEX, *value);
         elms->set(WRITABLE_INDEX, heap->true_value());
@@ -978,14 +1043,14 @@
           holder = Handle<JSObject>(JSObject::cast(proto));
         }
         FixedArray* elements = FixedArray::cast(holder->elements());
-        SeededNumberDictionary* dictionary = NULL;
+        NumberDictionary* dictionary = NULL;
         if (elements->map() == heap->non_strict_arguments_elements_map()) {
-          dictionary = SeededNumberDictionary::cast(elements->get(1));
+          dictionary = NumberDictionary::cast(elements->get(1));
         } else {
-          dictionary = SeededNumberDictionary::cast(elements);
+          dictionary = NumberDictionary::cast(elements);
         }
         int entry = dictionary->FindEntry(index);
-        ASSERT(entry != SeededNumberDictionary::kNotFound);
+        ASSERT(entry != NumberDictionary::kNotFound);
         PropertyDetails details = dictionary->DetailsAt(entry);
         switch (details.type()) {
           case CALLBACKS: {
@@ -1004,7 +1069,7 @@
           case NORMAL: {
             // This is a data property.
             elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-            Handle<Object> value = GetElement(obj, index);
+            Handle<Object> value = Object::GetElement(obj, index);
             ASSERT(!value.is_null());
             elms->set(VALUE_INDEX, *value);
             elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
@@ -1208,49 +1273,20 @@
     if (value->IsUndefined() || is_const_property) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
-      LookupResult lookup;
+      LookupResult lookup(isolate);
       global->Lookup(*name, &lookup);
       if (lookup.IsProperty()) {
-        // Determine if the property is local by comparing the holder
-        // against the global object. The information will be used to
-        // avoid throwing re-declaration errors when declaring
-        // variables or constants that exist in the prototype chain.
-        bool is_local = (*global == lookup.holder());
-        // Get the property attributes and determine if the property is
-        // read-only.
-        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
-        bool is_read_only = (attributes & READ_ONLY) != 0;
-        if (lookup.type() == INTERCEPTOR) {
-          // If the interceptor says the property is there, we
-          // just return undefined without overwriting the property.
-          // Otherwise, we continue to setting the property.
-          if (attributes != ABSENT) {
-            // Check if the existing property conflicts with regards to const.
-            if (is_local && (is_read_only || is_const_property)) {
-              const char* type = (is_read_only) ? "const" : "var";
-              return ThrowRedeclarationError(isolate, type, name);
-            };
-            // The property already exists without conflicting: Go to
-            // the next declaration.
-            continue;
-          }
-          // Fall-through and introduce the absent property by using
-          // SetProperty.
-        } else {
-          // For const properties, we treat a callback with this name
-          // even in the prototype as a conflicting declaration.
-          if (is_const_property && (lookup.type() == CALLBACKS)) {
-            return ThrowRedeclarationError(isolate, "const", name);
-          }
-          // Otherwise, we check for locally conflicting declarations.
-          if (is_local && (is_read_only || is_const_property)) {
-            const char* type = (is_read_only) ? "const" : "var";
-            return ThrowRedeclarationError(isolate, type, name);
-          }
-          // The property already exists without conflicting: Go to
-          // the next declaration.
+        // We found an existing property. Unless it was an interceptor
+        // that claims the property is absent, skip this declaration.
+        if (lookup.type() != INTERCEPTOR) {
           continue;
         }
+        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
+        if (attributes != ABSENT) {
+          continue;
+        }
+        // Fall-through and introduce the absent property by using
+        // SetProperty.
       }
     } else {
       is_function_declaration = true;
@@ -1264,32 +1300,18 @@
       value = function;
     }
 
-    LookupResult lookup;
+    LookupResult lookup(isolate);
     global->LocalLookup(*name, &lookup);
 
-    // There's a local property that we need to overwrite because
-    // we're either declaring a function or there's an interceptor
-    // that claims the property is absent.
-    //
-    // Check for conflicting re-declarations. We cannot have
-    // conflicting types in case of intercepted properties because
-    // they are absent.
-    if (lookup.IsProperty() &&
-        (lookup.type() != INTERCEPTOR) &&
-        (lookup.IsReadOnly() || is_const_property)) {
-      const char* type = (lookup.IsReadOnly()) ? "const" : "var";
-      return ThrowRedeclarationError(isolate, type, name);
-    }
-
     // Compute the property attributes. According to ECMA-262, section
     // 13, page 71, the property must be read-only and
     // non-deletable. However, neither SpiderMonkey nor KJS creates the
     // property as read-only, so we don't either.
     int attr = NONE;
-    if ((flags & kDeclareGlobalsEvalFlag) == 0) {
+    if (!DeclareGlobalsEvalFlag::decode(flags)) {
       attr |= DONT_DELETE;
     }
-    bool is_native = (flags & kDeclareGlobalsNativeFlag) != 0;
+    bool is_native = DeclareGlobalsNativeFlag::decode(flags);
     if (is_const_property || (is_native && is_function_declaration)) {
       attr |= READ_ONLY;
     }
@@ -1314,15 +1336,15 @@
                                                               value,
                                                               attributes));
     } else {
-      StrictModeFlag strict_mode =
-          ((flags & kDeclareGlobalsStrictModeFlag) != 0) ? kStrictMode
-                                                         : kNonStrictMode;
+      LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+      StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
+          ? kNonStrictMode : kStrictMode;
       RETURN_IF_EMPTY_HANDLE(isolate,
                              SetProperty(global,
                                          name,
                                          value,
                                          static_cast<PropertyAttributes>(attr),
-                                         strict_mode));
+                                         strict_mode_flag));
     }
   }
 
@@ -1335,15 +1357,17 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
-  CONVERT_ARG_CHECKED(Context, context, 0);
+  // Declarations are always made in a function or global context.  In the
+  // case of eval code, the context passed is the context of the caller,
+  // which may be some nested context and not the declaration context.
+  RUNTIME_ASSERT(args[0]->IsContext());
+  Handle<Context> context(Context::cast(args[0])->declaration_context());
+
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
   RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3], isolate);
 
-  // Declarations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
-
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1352,9 +1376,7 @@
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting
-    // re-declarations: This is similar to the code in parser.cc in
-    // the AstBuildingParser::Declare function.
+    // The name was declared before; check for conflicting re-declarations.
     if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
       // Functions are not read-only.
       ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1365,53 +1387,41 @@
     // Initialize it if necessary.
     if (*initial_value != NULL) {
       if (index >= 0) {
-        // The variable or constant context slot should always be in
-        // the function context or the arguments object.
-        if (holder->IsContext()) {
-          ASSERT(holder.is_identical_to(context));
-          if (((attributes & READ_ONLY) == 0) ||
-              context->get(index)->IsTheHole()) {
-            context->set(index, *initial_value);
-          }
-        } else {
-          // The holder is an arguments object.
-          Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-          Handle<Object> result = SetElement(arguments, index, initial_value,
-                                             kNonStrictMode);
-          if (result.is_null()) return Failure::Exception();
+        ASSERT(holder.is_identical_to(context));
+        if (((attributes & READ_ONLY) == 0) ||
+            context->get(index)->IsTheHole()) {
+          context->set(index, *initial_value);
         }
       } else {
-        // Slow case: The property is not in the FixedArray part of the context.
-        Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+        // Slow case: The property is in the context extension object of a
+        // function context or the global object of a global context.
+        Handle<JSObject> object = Handle<JSObject>::cast(holder);
         RETURN_IF_EMPTY_HANDLE(
             isolate,
-            SetProperty(context_ext, name, initial_value,
-                        mode, kNonStrictMode));
+            SetProperty(object, name, initial_value, mode, kNonStrictMode));
       }
     }
 
   } else {
     // The property is not in the function context. It needs to be
-    // "declared" in the function context's extension context, or in the
-    // global context.
-    Handle<JSObject> context_ext;
+    // "declared" in the function context's extension context or as a
+    // property of the the global object.
+    Handle<JSObject> object;
     if (context->has_extension()) {
-      // The function context's extension context exists - use it.
-      context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
+      object = Handle<JSObject>(JSObject::cast(context->extension()));
     } else {
-      // The function context's extension context does not exists - allocate
-      // it.
-      context_ext = isolate->factory()->NewJSObject(
+      // Context extension objects are allocated lazily.
+      ASSERT(context->IsFunctionContext());
+      object = isolate->factory()->NewJSObject(
           isolate->context_extension_function());
-      // And store it in the extension slot.
-      context->set_extension(*context_ext);
+      context->set_extension(*object);
     }
-    ASSERT(*context_ext != NULL);
+    ASSERT(*object != NULL);
 
     // Declare the property by setting it to the initial value if provided,
     // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
     // constant declarations).
-    ASSERT(!context_ext->HasLocalProperty(*name));
+    ASSERT(!object->HasLocalProperty(*name));
     Handle<Object> value(isolate->heap()->undefined_value(), isolate);
     if (*initial_value != NULL) value = initial_value;
     // Declaring a const context slot is a conflicting declaration if
@@ -1421,15 +1431,15 @@
     // SetProperty and no setters are invoked for those since they are
     // not real JSObjects.
     if (initial_value->IsTheHole() &&
-        !context_ext->IsJSContextExtensionObject()) {
-      LookupResult lookup;
-      context_ext->Lookup(*name, &lookup);
+        !object->IsJSContextExtensionObject()) {
+      LookupResult lookup(isolate);
+      object->Lookup(*name, &lookup);
       if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
         return ThrowRedeclarationError(isolate, "const", name);
       }
     }
     RETURN_IF_EMPTY_HANDLE(isolate,
-                           SetProperty(context_ext, name, value, mode,
+                           SetProperty(object, name, value, mode,
                                        kNonStrictMode));
   }
 
@@ -1440,7 +1450,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
-  // args[1] == strict_mode
+  // args[1] == language_mode
   // args[2] == value (optional)
 
   // Determine if we need to assign to the variable if it already
@@ -1451,8 +1461,9 @@
   CONVERT_ARG_CHECKED(String, name, 0);
   GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
+  StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
+      ? kNonStrictMode : kStrictMode;
 
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable.
@@ -1465,67 +1476,35 @@
   // to assign to the property.
   // Note that objects can have hidden prototypes, so we need to traverse
   // the whole chain of hidden prototypes to do a 'local' lookup.
-  JSObject* real_holder = global;
-  LookupResult lookup;
-  while (true) {
-    real_holder->LocalLookup(*name, &lookup);
-    if (lookup.IsProperty()) {
-      // Determine if this is a redeclaration of something read-only.
-      if (lookup.IsReadOnly()) {
-        // If we found readonly property on one of hidden prototypes,
-        // just shadow it.
-        if (real_holder != isolate->context()->global()) break;
-        return ThrowRedeclarationError(isolate, "const", name);
-      }
-
-      // Determine if this is a redeclaration of an intercepted read-only
-      // property and figure out if the property exists at all.
-      bool found = true;
-      PropertyType type = lookup.type();
-      if (type == INTERCEPTOR) {
-        HandleScope handle_scope(isolate);
-        Handle<JSObject> holder(real_holder);
-        PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
-        real_holder = *holder;
-        if (intercepted == ABSENT) {
-          // The interceptor claims the property isn't there. We need to
-          // make sure to introduce it.
-          found = false;
-        } else if ((intercepted & READ_ONLY) != 0) {
-          // The property is present, but read-only. Since we're trying to
-          // overwrite it with a variable declaration we must throw a
-          // re-declaration error.  However if we found readonly property
-          // on one of hidden prototypes, just shadow it.
-          if (real_holder != isolate->context()->global()) break;
-          return ThrowRedeclarationError(isolate, "const", name);
+  Object* object = global;
+  LookupResult lookup(isolate);
+  while (object->IsJSObject() &&
+         JSObject::cast(object)->map()->is_hidden_prototype()) {
+    JSObject* raw_holder = JSObject::cast(object);
+    raw_holder->LocalLookup(*name, &lookup);
+    if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+      HandleScope handle_scope(isolate);
+      Handle<JSObject> holder(raw_holder);
+      PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+      // Update the raw pointer in case it's changed due to GC.
+      raw_holder = *holder;
+      if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+        // Found an interceptor that's not read only.
+        if (assign) {
+          return raw_holder->SetProperty(
+              &lookup, *name, args[2], attributes, strict_mode_flag);
+        } else {
+          return isolate->heap()->undefined_value();
         }
       }
-
-      if (found && !assign) {
-        // The global property is there and we're not assigning any value
-        // to it. Just return.
-        return isolate->heap()->undefined_value();
-      }
-
-      // Assign the value (or undefined) to the property.
-      Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
-      return real_holder->SetProperty(
-          &lookup, *name, value, attributes, strict_mode);
     }
-
-    Object* proto = real_holder->GetPrototype();
-    if (!proto->IsJSObject())
-      break;
-
-    if (!JSObject::cast(proto)->map()->is_hidden_prototype())
-      break;
-
-    real_holder = JSObject::cast(proto);
+    object = raw_holder->GetPrototype();
   }
 
+  // Reload global in case the loop above performed a GC.
   global = isolate->context()->global();
   if (assign) {
-    return global->SetProperty(*name, args[2], attributes, strict_mode);
+    return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
   }
   return isolate->heap()->undefined_value();
 }
@@ -1552,7 +1531,7 @@
   // add it as a local property even in case of callbacks in the
   // prototype chain (this rules out using SetProperty).
   // We use SetLocalPropertyIgnoreAttributes instead
-  LookupResult lookup;
+  LookupResult lookup(isolate);
   global->LocalLookup(*name, &lookup);
   if (!lookup.IsProperty()) {
     return global->SetLocalPropertyIgnoreAttributes(*name,
@@ -1560,25 +1539,9 @@
                                                     attributes);
   }
 
-  // Determine if this is a redeclaration of something not
-  // read-only. In case the result is hidden behind an interceptor we
-  // need to ask it for the property attributes.
   if (!lookup.IsReadOnly()) {
-    if (lookup.type() != INTERCEPTOR) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
-    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
-
-    // Throw re-declaration error if the intercepted property is present
-    // but not read-only.
-    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
     // Restore global object from context (in case of GC) and continue
-    // with setting the value because the property is either absent or
-    // read-only. We also have to do redo the lookup.
+    // with setting the value.
     HandleScope handle_scope(isolate);
     Handle<GlobalObject> global(isolate->context()->global());
 
@@ -1595,19 +1558,20 @@
     return *value;
   }
 
-  // Set the value, but only we're assigning the initial value to a
+  // Set the value, but only if we're assigning the initial value to a
   // constant. For now, we determine this by checking if the
   // current value is the hole.
-  // Strict mode handling not needed (const disallowed in strict mode).
+  // Strict mode handling not needed (const is disallowed in strict mode).
   PropertyType type = lookup.type();
   if (type == FIELD) {
     FixedArray* properties = global->properties();
     int index = lookup.GetFieldIndex();
-    if (properties->get(index)->IsTheHole()) {
+    if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
       properties->set(index, *value);
     }
   } else if (type == NORMAL) {
-    if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+    if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
+        !lookup.IsReadOnly()) {
       global->SetNormalizedProperty(&lookup, *value);
     }
   } else {
@@ -1627,11 +1591,12 @@
 
   Handle<Object> value(args[0], isolate);
   ASSERT(!value->IsTheHole());
-  CONVERT_ARG_CHECKED(Context, context, 1);
-  Handle<String> name(String::cast(args[2]));
 
   // Initializations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
+  RUNTIME_ASSERT(args[1]->IsContext());
+  Handle<Context> context(Context::cast(args[1])->declaration_context());
+
+  Handle<String> name(String::cast(args[2]));
 
   int index;
   PropertyAttributes attributes;
@@ -1640,39 +1605,19 @@
   Handle<Object> holder =
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
-  // In most situations, the property introduced by the const
-  // declaration should be present in the context extension object.
-  // However, because declaration and initialization are separate, the
-  // property might have been deleted (if it was introduced by eval)
-  // before we reach the initialization point.
-  //
-  // Example:
-  //
-  //    function f() { eval("delete x; const x;"); }
-  //
-  // In that case, the initialization behaves like a normal assignment
-  // to property 'x'.
   if (index >= 0) {
-    if (holder->IsContext()) {
-      // Property was found in a context.  Perform the assignment if we
-      // found some non-constant or an uninitialized constant.
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
-        context->set(index, *value);
-      }
-    } else {
-      // The holder is an arguments object.
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          SetElement(arguments, index, value, kNonStrictMode));
+    ASSERT(holder->IsContext());
+    // Property was found in a context.  Perform the assignment if we
+    // found some non-constant or an uninitialized constant.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+      context->set(index, *value);
     }
     return *value;
   }
 
-  // The property could not be found, we introduce it in the global
-  // context.
+  // The property could not be found, we introduce it as a property of the
+  // global object.
   if (attributes == ABSENT) {
     Handle<JSObject> global = Handle<JSObject>(
         isolate->context()->global());
@@ -1683,29 +1628,41 @@
     return *value;
   }
 
-  // The property was present in a context extension object.
-  Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+  // The property was present in some function's context extension object,
+  // as a property on the subject of a with, or as a property of the global
+  // object.
+  //
+  // In most situations, eval-introduced consts should still be present in
+  // the context extension object.  However, because declaration and
+  // initialization are separate, the property might have been deleted
+  // before we reach the initialization point.
+  //
+  // Example:
+  //
+  //    function f() { eval("delete x; const x;"); }
+  //
+  // In that case, the initialization behaves like a normal assignment.
+  Handle<JSObject> object = Handle<JSObject>::cast(holder);
 
-  if (*context_ext == context->extension()) {
-    // This is the property that was introduced by the const
-    // declaration.  Set it if it hasn't been set before.  NOTE: We
-    // cannot use GetProperty() to get the current value as it
-    // 'unholes' the value.
-    LookupResult lookup;
-    context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+  if (*object == context->extension()) {
+    // This is the property that was introduced by the const declaration.
+    // Set it if it hasn't been set before.  NOTE: We cannot use
+    // GetProperty() to get the current value as it 'unholes' the value.
+    LookupResult lookup(isolate);
+    object->LocalLookupRealNamedProperty(*name, &lookup);
     ASSERT(lookup.IsProperty());  // the property was declared
     ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
 
     PropertyType type = lookup.type();
     if (type == FIELD) {
-      FixedArray* properties = context_ext->properties();
+      FixedArray* properties = object->properties();
       int index = lookup.GetFieldIndex();
       if (properties->get(index)->IsTheHole()) {
         properties->set(index, *value);
       }
     } else if (type == NORMAL) {
-      if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
-        context_ext->SetNormalizedProperty(&lookup, *value);
+      if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
+        object->SetNormalizedProperty(&lookup, *value);
       }
     } else {
       // We should not reach here. Any real, named property should be
@@ -1713,13 +1670,13 @@
       UNREACHABLE();
     }
   } else {
-    // The property was found in a different context extension object.
-    // Set it if it is not a read-only property.
+    // The property was found on some other object.  Set it if it is not a
+    // read-only property.
     if ((attributes & READ_ONLY) == 0) {
       // Strict mode not needed (const disallowed in strict mode).
       RETURN_IF_EMPTY_HANDLE(
           isolate,
-          SetProperty(context_ext, name, value, attributes, kNonStrictMode));
+          SetProperty(object, name, value, attributes, kNonStrictMode));
     }
   }
 
@@ -1818,14 +1775,17 @@
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
     regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
-    // TODO(lrn): Consider skipping write barrier on booleans as well.
-    // Both true and false should be in oldspace at all times.
-    regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
-    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
-    regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+    // Both true and false are immovable immortal objects so no need for write
+    // barrier.
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0),
-                                  SKIP_WRITE_BARRIER);
+                                  SKIP_WRITE_BARRIER);  // It's a Smi.
     return regexp;
   }
 
@@ -1914,7 +1874,7 @@
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSFunction, function, args[0]);
   SharedFunctionInfo* shared = function->shared();
-  if (shared->native() || shared->strict_mode()) {
+  if (shared->native() || !shared->is_classic_mode()) {
     return isolate->heap()->undefined_value();
   }
   // Returns undefined for strict or native functions, or
@@ -1994,15 +1954,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  fun->shared()->set_bound(true);
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
@@ -2081,24 +2032,6 @@
 }
 
 
-// Creates a local, readonly, property called length with the correct
-// length (when read by the user). This effectively overwrites the
-// interceptor used to normally provide the length.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionSetLength) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
-  CONVERT_CHECKED(Smi, length, args[1]);
-  MaybeObject* maybe_name =
-      isolate->heap()->AllocateStringFromAscii(CStrVector("length"));
-  String* name;
-  if (!maybe_name->To(&name)) return maybe_name;
-  PropertyAttributes attr =
-      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
-  return fun->AddProperty(name, length, attr, kNonStrictMode);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -2201,13 +2134,12 @@
     Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
     Handle<SharedFunctionInfo> shared(fun->shared());
 
-    if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+    if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
       return Failure::Exception();
     }
     // Since we don't store the source for this we should never
     // optimize this.
     shared->code()->set_optimizable(false);
-
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
     target->shared()->set_code(shared->code());
@@ -2239,9 +2171,7 @@
       literals->set(JSFunction::kLiteralGlobalContextIndex,
                     context->global_context());
     }
-    // It's okay to skip the write barrier here because the literals
-    // are guaranteed to be in old space.
-    target->set_literals(*literals, SKIP_WRITE_BARRIER);
+    target->set_literals(*literals);
     target->set_next_function_link(isolate->heap()->undefined_value());
 
     if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2325,7 +2255,8 @@
  public:
   explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
       : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(initial_capacity > 0);
@@ -2333,7 +2264,8 @@
 
   explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
       : array_(backing_store),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(backing_store->length() > 0);
@@ -2361,12 +2293,15 @@
   }
 
   void Add(Object* value) {
+    ASSERT(!value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
+    has_non_smi_elements_ = true;
   }
 
   void Add(Smi* value) {
+    ASSERT(value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
@@ -2391,7 +2326,7 @@
   }
 
   Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
-    target_array->set_elements(*array_);
+    FACTORY->SetContent(target_array, array_);
     target_array->set_length(Smi::FromInt(length_));
     return target_array;
   }
@@ -2399,6 +2334,7 @@
  private:
   Handle<FixedArray> array_;
   int length_;
+  bool has_non_smi_elements_;
 };
 
 
@@ -2893,7 +2829,7 @@
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
-      if (pattern->IsAsciiRepresentation()) {
+      if (pattern_content.IsAscii()) {
         FindStringIndices(isolate,
                           subject_vector,
                           pattern_content.ToAsciiVector(),
@@ -3019,7 +2955,7 @@
 
   // Shortcut for simple non-regexp global replacements
   if (is_global &&
-      regexp->TypeTag() == JSRegExp::ATOM &&
+      regexp_handle->TypeTag() == JSRegExp::ATOM &&
       compiled_replacement.simple_hint()) {
     if (subject_handle->HasOnlyAsciiChars() &&
         replacement_handle->HasOnlyAsciiChars()) {
@@ -3242,6 +3178,9 @@
 
   Address end_of_string = answer->address() + string_size;
   isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
+    MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
+  }
 
   return *answer;
 }
@@ -4001,13 +3940,13 @@
   // Slow case.
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   char* str = DoubleToRadixCString(value, radix);
   MaybeObject* result =
@@ -4023,13 +3962,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4048,13 +3987,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4073,13 +4012,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4126,11 +4065,6 @@
     return prototype->GetElement(index);
   }
 
-  return GetElement(object, index);
-}
-
-
-MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
   return object->GetElement(index);
 }
 
@@ -4187,6 +4121,23 @@
 }
 
 
+MaybeObject* TransitionElements(Handle<Object> object,
+                                ElementsKind to_kind,
+                                Isolate* isolate) {
+  HandleScope scope(isolate);
+  if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
+  ElementsKind from_kind =
+      Handle<JSObject>::cast(object)->map()->elements_kind();
+  if (Map::IsValidElementsTransition(from_kind, to_kind)) {
+    Handle<Object> result =
+        TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+    if (result.is_null()) return isolate->ThrowIllegalOperation();
+    return *result;
+  }
+  return isolate->ThrowIllegalOperation();
+}
+
+
 // KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
   NoHandleAllocation ha;
@@ -4203,40 +4154,63 @@
   //
   // Additionally, we need to make sure that we do not cache results
   // for objects that require access checks.
-  if (args[0]->IsJSObject() &&
-      !args[0]->IsJSGlobalProxy() &&
-      !args[0]->IsAccessCheckNeeded() &&
-      args[1]->IsString()) {
-    JSObject* receiver = JSObject::cast(args[0]);
-    String* key = String::cast(args[1]);
-    if (receiver->HasFastProperties()) {
-      // Attempt to use lookup cache.
-      Map* receiver_map = receiver->map();
-      KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
-      int offset = keyed_lookup_cache->Lookup(receiver_map, key);
-      if (offset != -1) {
-        Object* value = receiver->FastPropertyAt(offset);
-        return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
+  if (args[0]->IsJSObject()) {
+    if (!args[0]->IsJSGlobalProxy() &&
+        !args[0]->IsAccessCheckNeeded() &&
+        args[1]->IsString()) {
+      JSObject* receiver = JSObject::cast(args[0]);
+      String* key = String::cast(args[1]);
+      if (receiver->HasFastProperties()) {
+        // Attempt to use lookup cache.
+        Map* receiver_map = receiver->map();
+        KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+        int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+        if (offset != -1) {
+          Object* value = receiver->FastPropertyAt(offset);
+          return value->IsTheHole()
+              ? isolate->heap()->undefined_value()
+              : value;
+        }
+        // Lookup cache miss.  Perform lookup and update the cache if
+        // appropriate.
+        LookupResult result(isolate);
+        receiver->LocalLookup(key, &result);
+        if (result.IsProperty() && result.type() == FIELD) {
+          int offset = result.GetFieldIndex();
+          keyed_lookup_cache->Update(receiver_map, key, offset);
+          return receiver->FastPropertyAt(offset);
+        }
+      } else {
+        // Attempt dictionary lookup.
+        StringDictionary* dictionary = receiver->property_dictionary();
+        int entry = dictionary->FindEntry(key);
+        if ((entry != StringDictionary::kNotFound) &&
+            (dictionary->DetailsAt(entry).type() == NORMAL)) {
+          Object* value = dictionary->ValueAt(entry);
+          if (!receiver->IsGlobalObject()) return value;
+          value = JSGlobalPropertyCell::cast(value)->value();
+          if (!value->IsTheHole()) return value;
+          // If value is the hole do the general lookup.
+        }
       }
-      // Lookup cache miss.  Perform lookup and update the cache if appropriate.
-      LookupResult result;
-      receiver->LocalLookup(key, &result);
-      if (result.IsProperty() && result.type() == FIELD) {
-        int offset = result.GetFieldIndex();
-        keyed_lookup_cache->Update(receiver_map, key, offset);
-        return receiver->FastPropertyAt(offset);
-      }
-    } else {
-      // Attempt dictionary lookup.
-      StringDictionary* dictionary = receiver->property_dictionary();
-      int entry = dictionary->FindEntry(key);
-      if ((entry != StringDictionary::kNotFound) &&
-          (dictionary->DetailsAt(entry).type() == NORMAL)) {
-        Object* value = dictionary->ValueAt(entry);
-        if (!receiver->IsGlobalObject()) return value;
-        value = JSGlobalPropertyCell::cast(value)->value();
-        if (!value->IsTheHole()) return value;
-        // If value is the hole do the general lookup.
+    } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
+      // JSObject without a string key. If the key is a Smi, check for a
+      // definite out-of-bounds access to elements, which is a strong indicator
+      // that subsequent accesses will also call the runtime. Proactively
+      // transition elements to FAST_ELEMENTS to avoid excessive boxing of
+      // doubles for those future calls in the case that the elements would
+      // become FAST_DOUBLE_ELEMENTS.
+      Handle<JSObject> js_object(args.at<JSObject>(0));
+      ElementsKind elements_kind = js_object->GetElementsKind();
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_DOUBLE_ELEMENTS) {
+        FixedArrayBase* elements = js_object->elements();
+        if (args.at<Smi>(1)->value() >= elements->length()) {
+          MaybeObject* maybe_object = TransitionElements(js_object,
+                                                         FAST_ELEMENTS,
+                                                         isolate);
+          if (maybe_object->IsFailure()) return maybe_object;
+        }
       }
     }
   } else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4269,12 +4243,12 @@
   CONVERT_CHECKED(String, name, args[1]);
   CONVERT_CHECKED(Smi, flag_setter, args[2]);
   Object* fun = args[3];
-  RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+  RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
   CONVERT_CHECKED(Smi, flag_attr, args[4]);
   int unchecked = flag_attr->value();
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   RUNTIME_ASSERT(!obj->IsNull());
-  LookupResult result;
+  LookupResult result(isolate);
   obj->LocalLookupRealNamedProperty(name, &result);
 
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
@@ -4316,11 +4290,11 @@
   uint32_t index;
   bool is_element = name->AsArrayIndex(&index);
 
-  // Special case for elements if any of the flags are true.
+  // Special case for elements if any of the flags might be involved.
   // If elements are in fast case we always implicitly assume that:
   // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
-  if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
-      is_element) {
+  if (is_element && (attr != NONE ||
+      js_object->HasLocalElement(index) == JSObject::DICTIONARY_ELEMENT)) {
     // Normalize the elements to enable attributes on the property.
     if (js_object->IsJSGlobalProxy()) {
       // We do not need to do access checks here since these has already
@@ -4342,12 +4316,12 @@
       return isolate->Throw(*error);
     }
 
-    Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
+    Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
     // Make sure that we never go back to fast case.
     dictionary->set_requires_slow_elements();
     PropertyDetails details = PropertyDetails(attr, NORMAL);
-    Handle<SeededNumberDictionary> extended_dictionary =
-        SeededNumberDictionarySet(dictionary, index, obj_value, details);
+    Handle<NumberDictionary> extended_dictionary =
+        NumberDictionarySet(dictionary, index, obj_value, details);
     if (*extended_dictionary != *dictionary) {
       if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
         FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
@@ -4358,7 +4332,7 @@
     return *obj_value;
   }
 
-  LookupResult result;
+  LookupResult result(isolate);
   js_object->LocalLookupRealNamedProperty(*name, &result);
 
   // To be compatible with safari we do not change the value on API objects
@@ -4408,12 +4382,12 @@
                                               Handle<Object> value,
                                               PropertyAttributes attr) {
   // Normalize the elements to enable attributes on the property.
-  Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
+  Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
   // Make sure that we never go back to fast case.
   dictionary->set_requires_slow_elements();
   PropertyDetails details = PropertyDetails(attr, NORMAL);
-  Handle<SeededNumberDictionary> extended_dictionary =
-      SeededNumberDictionarySet(dictionary, index, value, details);
+  Handle<NumberDictionary> extended_dictionary =
+      NumberDictionarySet(dictionary, index, value, details);
   if (*extended_dictionary != *dictionary) {
     js_object->set_elements(*extended_dictionary);
   }
@@ -4437,6 +4411,14 @@
     return isolate->Throw(*error);
   }
 
+  if (object->IsJSProxy()) {
+    bool has_pending_exception = false;
+    Handle<Object> name = Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    return JSProxy::cast(*object)->SetProperty(
+        String::cast(*name), *value, attr, strict_mode);
+  }
+
   // If the object isn't a JavaScript object, we ignore the store.
   if (!object->IsJSObject()) return *value;
 
@@ -4556,7 +4538,7 @@
 
   // Check if the given key is an array index.
   uint32_t index;
-  if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
+  if (key->ToArrayIndex(&index)) {
     // In Firefox/SpiderMonkey, Safari and Opera you can access the
     // characters of a string using [] notation.  In the case of a
     // String object we just need to redirect the deletion to the
@@ -4567,8 +4549,7 @@
       return isolate->heap()->true_value();
     }
 
-    return JSObject::cast(*receiver)->DeleteElement(
-        index, JSReceiver::FORCE_DELETION);
+    return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
   }
 
   Handle<String> key_string;
@@ -4603,10 +4584,8 @@
 
   StrictModeFlag strict_mode = kNonStrictMode;
   if (args.length() == 5) {
-    CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
-    RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                   strict_unchecked == kNonStrictMode);
-    strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+    CONVERT_STRICT_MODE_ARG(strict_mode_flag, 4);
+    strict_mode = strict_mode_flag;
   }
 
   return Runtime::SetObjectProperty(isolate,
@@ -4618,6 +4597,22 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+  Handle<Object> object = args.at<Object>(0);
+  return TransitionElements(object, FAST_ELEMENTS, isolate);
+}
+
+
 // Set the native flag on the function.
 // This is used to decide if we should transform null and undefined
 // into the global object when doing call and apply.
@@ -4635,6 +4630,44 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
+  RUNTIME_ASSERT(args.length() == 5);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_SMI_ARG_CHECKED(store_index, 1);
+  Handle<Object> value = args.at<Object>(2);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 3);
+  CONVERT_SMI_ARG_CHECKED(literal_index, 4);
+  HandleScope scope;
+
+  Object* raw_boilerplate_object = literals->get(literal_index);
+  Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
+#if DEBUG
+  ElementsKind elements_kind = object->GetElementsKind();
+#endif
+  ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
+  // Smis should never trigger transitions.
+  ASSERT(!value->IsSmi());
+
+  if (value->IsNumber()) {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
+    TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+    ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+    FixedDoubleArray* double_array =
+        FixedDoubleArray::cast(object->elements());
+    HeapNumber* number = HeapNumber::cast(*value);
+    double_array->set(store_index, number->Number());
+  } else {
+    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           elements_kind == FAST_DOUBLE_ELEMENTS);
+    TransitionElementsKind(object, FAST_ELEMENTS);
+    FixedArray* object_array =
+        FixedArray::cast(object->elements());
+    object_array->set(store_index, *value);
+  }
+  return *object;
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -4664,8 +4697,8 @@
 
   CONVERT_CHECKED(JSReceiver, object, args[0]);
   CONVERT_CHECKED(String, key, args[1]);
-  CONVERT_SMI_ARG_CHECKED(strict, 2);
-  return object->DeleteProperty(key, (strict == kStrictMode)
+  CONVERT_STRICT_MODE_ARG(strict_mode, 2);
+  return object->DeleteProperty(key, (strict_mode == kStrictMode)
                                       ? JSReceiver::STRICT_DELETION
                                       : JSReceiver::NORMAL_DELETION);
 }
@@ -4730,29 +4763,24 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
 
-  // Only JS receivers can have properties.
-  if (args[0]->IsJSReceiver()) {
-    JSReceiver* receiver = JSReceiver::cast(args[0]);
-    CONVERT_CHECKED(String, key, args[1]);
-    if (receiver->HasProperty(key)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasProperty(key);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+  CONVERT_CHECKED(Smi, index, args[1]);
 
-  // Only JS objects can have elements.
-  if (args[0]->IsJSObject()) {
-    JSObject* object = JSObject::cast(args[0]);
-    CONVERT_CHECKED(Smi, index_obj, args[1]);
-    uint32_t index = index_obj->value();
-    if (object->HasElement(index)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasElement(index->value());
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
@@ -4765,7 +4793,37 @@
 
   uint32_t index;
   if (key->AsArrayIndex(&index)) {
-    return isolate->heap()->ToBoolean(object->HasElement(index));
+    JSObject::LocalElementType type = object->HasLocalElement(index);
+    switch (type) {
+      case JSObject::UNDEFINED_ELEMENT:
+      case JSObject::STRING_CHARACTER_ELEMENT:
+        return isolate->heap()->false_value();
+      case JSObject::INTERCEPTED_ELEMENT:
+      case JSObject::FAST_ELEMENT:
+        return isolate->heap()->true_value();
+      case JSObject::DICTIONARY_ELEMENT: {
+        if (object->IsJSGlobalProxy()) {
+          Object* proto = object->GetPrototype();
+          if (proto->IsNull()) {
+            return isolate->heap()->false_value();
+          }
+          ASSERT(proto->IsJSGlobalObject());
+          object = JSObject::cast(proto);
+        }
+        FixedArray* elements = FixedArray::cast(object->elements());
+        NumberDictionary* dictionary = NULL;
+        if (elements->map() ==
+            isolate->heap()->non_strict_arguments_elements_map()) {
+          dictionary = NumberDictionary::cast(elements->get(1));
+        } else {
+          dictionary = NumberDictionary::cast(elements);
+        }
+        int entry = dictionary->FindEntry(index);
+        ASSERT(entry != NumberDictionary::kNotFound);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        return isolate->heap()->ToBoolean(!details.IsDontEnum());
+      }
+    }
   }
 
   PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -4776,8 +4834,11 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
-  return *GetKeysFor(object);
+  CONVERT_ARG_CHECKED(JSReceiver, object, 0);
+  bool threw = false;
+  Handle<JSArray> result = GetKeysFor(object, &threw);
+  if (threw) return Failure::Exception();
+  return *result;
 }
 
 
@@ -4789,14 +4850,16 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
   ASSERT(args.length() == 1);
 
-  CONVERT_CHECKED(JSObject, raw_object, args[0]);
+  CONVERT_CHECKED(JSReceiver, raw_object, args[0]);
 
   if (raw_object->IsSimpleEnum()) return raw_object->map();
 
   HandleScope scope(isolate);
-  Handle<JSObject> object(raw_object);
-  Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
-                                                      INCLUDE_PROTOS);
+  Handle<JSReceiver> object(raw_object);
+  bool threw = false;
+  Handle<FixedArray> content =
+      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
+  if (threw) return Failure::Exception();
 
   // Test again, since cache may have been built by preceding call.
   if (object->IsSimpleEnum()) return object->map();
@@ -4993,8 +5056,11 @@
     object = Handle<JSObject>::cast(proto);
   }
 
-  Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
-                                                       LOCAL_ONLY);
+  bool threw = false;
+  Handle<FixedArray> contents =
+      GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
+  if (threw) return Failure::Exception();
+
   // Some fast paths through GetKeysInFixedArrayFor reuse a cached
   // property array and since the result is mutable we have to create
   // a fresh clone on each invocation.
@@ -5058,7 +5124,7 @@
   if (key->Equals(isolate->heap()->callee_symbol())) {
     Object* function = frame->function();
     if (function->IsJSFunction() &&
-        JSFunction::cast(function)->shared()->strict_mode()) {
+        !JSFunction::cast(function)->shared()->is_classic_mode()) {
       return isolate->Throw(*isolate->factory()->NewTypeError(
           "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
     }
@@ -5579,7 +5645,7 @@
   StringType* new_string = StringType::cast(new_object);
 
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
@@ -5667,16 +5733,15 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
                                                   write_cursor,
                                                   characters);
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5754,9 +5819,8 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   *(write_cursor++) = '[';
   for (int i = 0; i < length; i++) {
     if (i != 0) *(write_cursor++) = ',';
@@ -5777,7 +5841,7 @@
 
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -6146,7 +6210,7 @@
 
 
 static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
-  return unibrow::WhiteSpace::Is(c) || c == 0x200b;
+  return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
 }
 
 
@@ -6229,6 +6293,8 @@
   int part_count = indices.length();
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
+  MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+  if (maybe_result->IsFailure()) return maybe_result;
   result->set_length(Smi::FromInt(part_count));
 
   ASSERT(result->HasFastElements());
@@ -6275,11 +6341,11 @@
   FixedArray* ascii_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
+  WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (i = 0; i < length; ++i) {
     Object* value = ascii_cache->get(chars[i]);
     if (value == undefined) break;
-    ASSERT(!heap->InNewSpace(value));
-    elements->set(i, value, SKIP_WRITE_BARRIER);
+    elements->set(i, value, mode);
   }
   if (i < length) {
     ASSERT(Smi::FromInt(0) == 0);
@@ -6603,6 +6669,9 @@
   // This assumption is used by the slice encoding in one or two smis.
   ASSERT(Smi::kMaxValue >= String::kMaxLength);
 
+  MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+  if (maybe_result->IsFailure()) return maybe_result;
+
   int special_length = special->length();
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6830,7 +6899,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, elements_array, args[0]);
-  RUNTIME_ASSERT(elements_array->HasFastElements());
+  RUNTIME_ASSERT(elements_array->HasFastElements() ||
+                 elements_array->HasFastSmiOnlyElements());
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
   CONVERT_CHECKED(String, separator, args[2]);
   // elements_array is fast-mode JSarray of alternating positions
@@ -7434,7 +7504,7 @@
 }
 
 
-static int MakeDay(int year, int month, int day) {
+static int MakeDay(int year, int month) {
   static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
                                        181, 212, 243, 273, 304, 334};
   static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
@@ -7471,23 +7541,22 @@
                       year1 / 400 -
                       base_day;
 
-  if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
-    return day_from_year + day_from_month[month] + day - 1;
+  if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
+    return day_from_year + day_from_month[month];
   }
 
-  return day_from_year + day_from_month_leap[month] + day - 1;
+  return day_from_year + day_from_month_leap[month];
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 2);
 
   CONVERT_SMI_ARG_CHECKED(year, 0);
   CONVERT_SMI_ARG_CHECKED(month, 1);
-  CONVERT_SMI_ARG_CHECKED(date, 2);
 
-  return Smi::FromInt(MakeDay(year, month, date));
+  return Smi::FromInt(MakeDay(year, month));
 }
 
 
@@ -7716,7 +7785,7 @@
   month = kMonthInYear[date];
   day = kDayInYear[date];
 
-  ASSERT(MakeDay(year, month, day) == save_date);
+  ASSERT(MakeDay(year, month) + day - 1 == save_date);
 }
 
 
@@ -7730,7 +7799,7 @@
   year = 400 * (date / kDaysIn400Years) - kYearsOffset;
   date %= kDaysIn400Years;
 
-  ASSERT(MakeDay(year, 0, 1) + date == save_date);
+  ASSERT(MakeDay(year, 0) + date == save_date);
 
   date--;
   int yd1 = date / kDaysIn100Years;
@@ -7753,8 +7822,8 @@
   ASSERT(is_leap || (date >= 0));
   ASSERT((date < 365) || (is_leap && (date < 366)));
   ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
-  ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
-  ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+  ASSERT(is_leap || ((MakeDay(year, 0) + date) == save_date));
+  ASSERT(!is_leap || ((MakeDay(year, 0) + date + 1) == save_date));
 
   if (is_leap) {
     day = kDayInYear[2*365 + 1 + date];
@@ -7764,7 +7833,7 @@
     month = kMonthInYear[date];
   }
 
-  ASSERT(MakeDay(year, month, day) == save_date);
+  ASSERT(MakeDay(year, month) + day - 1 == save_date);
 }
 
 
@@ -7788,11 +7857,13 @@
   int year, month, day;
   DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
 
-  RUNTIME_ASSERT(res_array->elements()->map() ==
-                 isolate->heap()->fixed_array_map());
-  FixedArray* elms = FixedArray::cast(res_array->elements());
-  RUNTIME_ASSERT(elms->length() == 3);
+  FixedArrayBase* elms_base = FixedArrayBase::cast(res_array->elements());
+  RUNTIME_ASSERT(elms_base->length() == 3);
+  RUNTIME_ASSERT(res_array->HasFastTypeElements());
 
+  MaybeObject* maybe = res_array->EnsureWritableFastElements();
+  if (maybe->IsFailure()) return maybe;
+  FixedArray* elms = FixedArray::cast(res_array->elements());
   elms->set(0, Smi::FromInt(year));
   elms->set(1, Smi::FromInt(month));
   elms->set(2, Smi::FromInt(day));
@@ -7846,14 +7917,14 @@
         --index;
       }
 
-      ScopeInfo<> scope_info(callee->shared()->scope_info());
+      Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
       while (index >= 0) {
         // Detect duplicate names to the right in the parameter list.
-        Handle<String> name = scope_info.parameter_name(index);
-        int context_slot_count = scope_info.number_of_context_slots();
+        Handle<String> name(scope_info->ParameterName(index));
+        int context_local_count = scope_info->ContextLocalCount();
         bool duplicate = false;
         for (int j = index + 1; j < parameter_count; ++j) {
-          if (scope_info.parameter_name(j).is_identical_to(name)) {
+          if (scope_info->ParameterName(j) == *name) {
             duplicate = true;
             break;
           }
@@ -7868,17 +7939,16 @@
           // The context index goes in the parameter map with a hole in the
           // arguments array.
           int context_index = -1;
-          for (int j = Context::MIN_CONTEXT_SLOTS;
-               j < context_slot_count;
-               ++j) {
-            if (scope_info.context_slot_name(j).is_identical_to(name)) {
+          for (int j = 0; j < context_local_count; ++j) {
+            if (scope_info->ContextLocalName(j) == *name) {
               context_index = j;
               break;
             }
           }
           ASSERT(context_index >= 0);
           arguments->set_the_hole(index);
-          parameter_map->set(index + 2, Smi::FromInt(context_index));
+          parameter_map->set(index + 2, Smi::FromInt(
+              Context::MIN_CONTEXT_SLOTS + context_index));
         }
 
         --index;
@@ -7952,8 +8022,12 @@
 }
 
 
-static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
-                                                        int* total_argc) {
+// Find the arguments of the JavaScript function invocation that called
+// into C++ code. Collect these in a newly allocated array of handles (possibly
+// prefixed by a number of empty handles).
+static SmartArrayPointer<Handle<Object> > GetCallerArguments(
+    int prefix_argc,
+    int* total_argc) {
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
@@ -7968,11 +8042,12 @@
                                             inlined_frame_index,
                                             &args_slots);
 
-    *total_argc = bound_argc + args_count;
-    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    *total_argc = prefix_argc + args_count;
+    SmartArrayPointer<Handle<Object> > param_data(
+        NewArray<Handle<Object> >(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = args_slots[i].GetValue();
-      param_data[bound_argc + i] = val.location();
+      param_data[prefix_argc + i] = val;
     }
     return param_data;
   } else {
@@ -7980,48 +8055,131 @@
     frame = it.frame();
     int args_count = frame->ComputeParametersCount();
 
-    *total_argc = bound_argc + args_count;
-    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    *total_argc = prefix_argc + args_count;
+    SmartArrayPointer<Handle<Object> > param_data(
+        NewArray<Handle<Object> >(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = Handle<Object>(frame->GetParameter(i));
-      param_data[bound_argc + i] = val.location();
+      param_data[prefix_argc + i] = val;
     }
     return param_data;
   }
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 4);
+  CONVERT_ARG_CHECKED(JSFunction, bound_function, 0);
+  RUNTIME_ASSERT(args[3]->IsNumber());
+  Handle<Object> bindee = args.at<Object>(1);
+
+  // TODO(lrn): Create bound function in C++ code from premade shared info.
+  bound_function->shared()->set_bound(true);
+  // Get all arguments of calling function (Function.prototype.bind).
+  int argc = 0;
+  SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
+  // Don't count the this-arg.
+  if (argc > 0) {
+    ASSERT(*arguments[0] == args[2]);
+    argc--;
+  } else {
+    ASSERT(args[2]->IsUndefined());
+  }
+  // Initialize array of bindings (function, this, and any existing arguments
+  // if the function was already bound).
+  Handle<FixedArray> new_bindings;
+  int i;
+  if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
+    Handle<FixedArray> old_bindings(
+        JSFunction::cast(*bindee)->function_bindings());
+    new_bindings =
+        isolate->factory()->NewFixedArray(old_bindings->length() + argc);
+    bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
+    i = 0;
+    for (int n = old_bindings->length(); i < n; i++) {
+      new_bindings->set(i, old_bindings->get(i));
+    }
+  } else {
+    int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
+    new_bindings = isolate->factory()->NewFixedArray(array_size);
+    new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
+    new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
+    i = 2;
+  }
+  // Copy arguments, skipping the first which is "this_arg".
+  for (int j = 0; j < argc; j++, i++) {
+    new_bindings->set(i, *arguments[j + 1]);
+  }
+  new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
+  bound_function->set_function_bindings(*new_bindings);
+
+  // Update length.
+  Handle<String> length_symbol = isolate->factory()->length_symbol();
+  Handle<Object> new_length(args.at<Object>(3));
+  PropertyAttributes attr =
+      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+  ForceSetProperty(bound_function, length_symbol, new_length, attr);
+  return *bound_function;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
+  HandleScope handles(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
+  if (callable->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
+    if (function->shared()->bound()) {
+      Handle<FixedArray> bindings(function->function_bindings());
+      ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
+      return *isolate->factory()->NewJSArrayWithElements(bindings);
+    }
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  ASSERT(args.length() == 1);
   // First argument is a function to use as a constructor.
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  RUNTIME_ASSERT(function->shared()->bound());
 
-  // Second argument is either null or an array of bound arguments.
-  Handle<FixedArray> bound_args;
-  int bound_argc = 0;
-  if (!args[1]->IsNull()) {
-    CONVERT_ARG_CHECKED(JSArray, params, 1);
-    RUNTIME_ASSERT(params->HasFastElements());
-    bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
-    bound_argc = Smi::cast(params->length())->value();
-  }
+  // The argument is a bound function. Extract its bound arguments
+  // and callable.
+  Handle<FixedArray> bound_args =
+      Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
+  int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
+  Handle<Object> bound_function(
+      JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
+  ASSERT(!bound_function->IsJSFunction() ||
+         !Handle<JSFunction>::cast(bound_function)->shared()->bound());
 
   int total_argc = 0;
-  SmartArrayPointer<Object**> param_data =
-      GetNonBoundArguments(bound_argc, &total_argc);
+  SmartArrayPointer<Handle<Object> > param_data =
+      GetCallerArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
-    Handle<Object> val = Handle<Object>(bound_args->get(i));
-    param_data[i] = val.location();
+    param_data[i] = Handle<Object>(bound_args->get(
+        JSFunction::kBoundArgumentsStartIndex + i));
   }
 
+  if (!bound_function->IsJSFunction()) {
+    bool exception_thrown;
+    bound_function = Execution::TryGetConstructorDelegate(bound_function,
+                                                          &exception_thrown);
+    if (exception_thrown) return Failure::Exception();
+  }
+  ASSERT(bound_function->IsJSFunction());
+
   bool exception = false;
   Handle<Object> result =
-      Execution::New(function, total_argc, *param_data, &exception);
+      Execution::New(Handle<JSFunction>::cast(bound_function),
+                     total_argc, *param_data, &exception);
   if (exception) {
-      return Failure::Exception();
+    return Failure::Exception();
   }
-
   ASSERT(!result.is_null());
   return *result;
 }
@@ -8034,12 +8192,9 @@
     prototype = Handle<Object>(function->instance_prototype(), isolate);
   }
   if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
-    ConstructStubCompiler compiler;
-    MaybeObject* code = compiler.CompileConstructStub(*function);
-    if (!code->IsFailure()) {
-      function->shared()->set_construct_stub(
-          Code::cast(code->ToObjectUnchecked()));
-    }
+    ConstructStubCompiler compiler(isolate);
+    Handle<Code> code = compiler.CompileConstructStub(function);
+    function->shared()->set_construct_stub(*code);
   }
 }
 
@@ -8098,9 +8253,11 @@
   // available. We cannot use EnsureCompiled because that forces a
   // compilation through the shared function info which makes it
   // impossible for us to optimize.
-  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
-  if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
+  if (!function->is_compiled()) {
+    JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  }
 
+  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   if (!function->has_initial_map() &&
       shared->IsInobjectSlackTrackingInProgress()) {
     // The tracking is already in progress for another function. We can only
@@ -8151,7 +8308,7 @@
 
   // Compile the target function.
   ASSERT(!function->is_compiled());
-  if (!CompileLazy(function, KEEP_EXCEPTION)) {
+  if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -8188,7 +8345,9 @@
     function->ReplaceCode(function->shared()->code());
     return function->code();
   }
-  if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
+  if (JSFunction::CompileOptimized(function,
+                                   AstNode::kNoNumber,
+                                   CLEAR_EXCEPTION)) {
     return function->code();
   }
   if (FLAG_trace_opt) {
@@ -8201,6 +8360,31 @@
 }
 
 
+class ActivationsFinder : public ThreadVisitor {
+ public:
+  explicit ActivationsFinder(JSFunction* function)
+      : function_(function), has_activations_(false) {}
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    if (has_activations_) return;
+
+    for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+      JavaScriptFrame* frame = it.frame();
+      if (frame->is_optimized() && frame->function() == function_) {
+        has_activations_ = true;
+        return;
+      }
+    }
+  }
+
+  bool has_activations() { return has_activations_; }
+
+ private:
+  JSFunction* function_;
+  bool has_activations_;
+};
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -8247,17 +8431,24 @@
     return isolate->heap()->undefined_value();
   }
 
-  // Count the number of optimized activations of the function.
-  int activations = 0;
+  // Find other optimized activations of the function.
+  bool has_other_activations = false;
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->is_optimized() && frame->function() == *function) {
-      activations++;
+      has_other_activations = true;
+      break;
     }
     it.Advance();
   }
 
-  if (activations == 0) {
+  if (!has_other_activations) {
+    ActivationsFinder activations_finder(*function);
+    isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+    has_other_activations = activations_finder.has_activations();
+  }
+
+  if (!has_other_activations) {
     if (FLAG_trace_deopt) {
       PrintF("[removing optimized code for: ");
       function->PrintName();
@@ -8312,6 +8503,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
+  // The least significant bit (after untagging) indicates whether the
+  // function is currently optimized, regardless of reason.
   if (!V8::UseCrankshaft()) {
     return Smi::FromInt(4);  // 4 == "never".
   }
@@ -8395,7 +8588,7 @@
     // Try to compile the optimized code.  A true return value from
     // CompileOptimized means that compilation succeeded, not necessarily
     // that optimization succeeded.
-    if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
+    if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
         function->IsOptimized()) {
       DeoptimizationInputData* data = DeoptimizationInputData::cast(
           function->code()->deoptimization_data());
@@ -8452,6 +8645,42 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() >= 2);
+  CONVERT_CHECKED(JSReceiver, fun, args[args.length() - 1]);
+  Object* receiver = args[0];
+  int argc = args.length() - 2;
+
+  // If there are too many arguments, allocate argv via malloc.
+  const int argv_small_size = 10;
+  Handle<Object> argv_small_buffer[argv_small_size];
+  SmartArrayPointer<Handle<Object> > argv_large_buffer;
+  Handle<Object>* argv = argv_small_buffer;
+  if (argc > argv_small_size) {
+    argv = new Handle<Object>[argc];
+    if (argv == NULL) return isolate->StackOverflow();
+    argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+  }
+
+  for (int i = 0; i < argc; ++i) {
+     MaybeObject* maybe = args[1 + i];
+     Object* object;
+     if (!maybe->To<Object>(&object)) return maybe;
+     argv[i] = Handle<Object>(object);
+  }
+
+  bool threw;
+  Handle<JSReceiver> hfun(fun);
+  Handle<Object> hreceiver(receiver);
+  Handle<Object> result =
+      Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+
+  if (threw) return Failure::Exception();
+  return *result;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 5);
@@ -8484,11 +8713,11 @@
      argv[i] = Handle<Object>(object);
   }
 
-  bool threw = false;
+  bool threw;
   Handle<JSReceiver> hfun(fun);
   Handle<Object> hreceiver(receiver);
-  Handle<Object> result = Execution::Call(
-      hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
+  Handle<Object> result =
+      Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
 
   if (threw) return Failure::Exception();
   return *result;
@@ -8516,7 +8745,7 @@
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSFunction, function, args[0]);
-  int length = function->shared()->scope_info()->NumberOfContextSlots();
+  int length = function->shared()->scope_info()->ContextLength();
   Object* result;
   { MaybeObject* maybe_result =
         isolate->heap()->AllocateFunctionContext(length, function);
@@ -8602,7 +8831,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
+  ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
   JSFunction* function;
   if (args[1]->IsSmi()) {
     // A smi sentinel indicates a context nested inside global code rather
@@ -8651,18 +8880,10 @@
   }
 
   // The slot was found in a JSObject, either a context extension object,
-  // the global object, or an arguments object.  Try to delete it
-  // (respecting DONT_DELETE).  For consistency with V8's usual behavior,
-  // which allows deleting all parameters in functions that mention
-  // 'arguments', we do this even for the case of slots found on an
-  // arguments object.  The slot was found on an arguments object if the
-  // index is non-negative.
+  // the global object, or the subject of a with.  Try to delete it
+  // (respecting DONT_DELETE).
   Handle<JSObject> object = Handle<JSObject>::cast(holder);
-  if (index >= 0) {
-    return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
-  } else {
-    return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
-  }
+  return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
 }
 
 
@@ -8747,52 +8968,53 @@
                                           &attributes,
                                           &binding_flags);
 
-  // If the index is non-negative, the slot has been found in a local
-  // variable or a parameter. Read it from the context object or the
-  // arguments object.
+  // If the index is non-negative, the slot has been found in a context.
   if (index >= 0) {
-    // If the "property" we were looking for is a local variable or an
-    // argument in a context, the receiver is the global object; see
-    // ECMA-262, 3rd., 10.1.6 and 10.2.3.
+    ASSERT(holder->IsContext());
+    // If the "property" we were looking for is a local variable, the
+    // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
     //
-    // Use the hole as the receiver to signal that the receiver is
-    // implicit and that the global receiver should be used.
+    // Use the hole as the receiver to signal that the receiver is implicit
+    // and that the global receiver should be used (as distinguished from an
+    // explicit receiver that happens to be a global object).
     Handle<Object> receiver = isolate->factory()->the_hole_value();
-    MaybeObject* value = (holder->IsContext())
-        ? Context::cast(*holder)->get(index)
-        : JSObject::cast(*holder)->GetElement(index);
+    Object* value = Context::cast(*holder)->get(index);
     // Check for uninitialized bindings.
-    if (holder->IsContext() &&
-        binding_flags == MUTABLE_CHECK_INITIALIZED &&
-        value->IsTheHole()) {
-      Handle<Object> reference_error =
-          isolate->factory()->NewReferenceError("not_defined",
-                                                HandleVector(&name, 1));
-      return MakePair(isolate->Throw(*reference_error), NULL);
-    } else {
-      return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+    switch (binding_flags) {
+      case MUTABLE_CHECK_INITIALIZED:
+      case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
+        if (value->IsTheHole()) {
+          Handle<Object> reference_error =
+              isolate->factory()->NewReferenceError("not_defined",
+                                                    HandleVector(&name, 1));
+          return MakePair(isolate->Throw(*reference_error), NULL);
+        }
+        // FALLTHROUGH
+      case MUTABLE_IS_INITIALIZED:
+      case IMMUTABLE_IS_INITIALIZED:
+      case IMMUTABLE_IS_INITIALIZED_HARMONY:
+        ASSERT(!value->IsTheHole());
+        return MakePair(value, *receiver);
+      case IMMUTABLE_CHECK_INITIALIZED:
+        return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
+      case MISSING_BINDING:
+        UNREACHABLE();
+        return MakePair(NULL, NULL);
     }
   }
 
-  // If the holder is found, we read the property from it.
-  if (!holder.is_null() && holder->IsJSObject()) {
-    ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
-    JSObject* object = JSObject::cast(*holder);
-    Object* receiver;
-    if (object->IsGlobalObject()) {
-      receiver = GlobalObject::cast(object)->global_receiver();
-    } else if (context->is_exception_holder(*holder)) {
-      // Use the hole as the receiver to signal that the receiver is
-      // implicit and that the global receiver should be used.
-      receiver = isolate->heap()->the_hole_value();
-    } else {
-      receiver = ComputeReceiverForNonGlobal(isolate, object);
-    }
-
+  // Otherwise, if the slot was found the holder is a context extension
+  // object, subject of a with, or a global object.  We read the named
+  // property from it.
+  if (!holder.is_null()) {
+    Handle<JSObject> object = Handle<JSObject>::cast(holder);
+    ASSERT(object->HasProperty(*name));
     // GetProperty below can cause GC.
-    Handle<Object> receiver_handle(receiver);
+    Handle<Object> receiver_handle(object->IsGlobalObject()
+        ? GlobalObject::cast(*object)->global_receiver()
+        : ComputeReceiverForNonGlobal(isolate, *object));
 
-    // No need to unhole the value here. This is taken care of by the
+    // No need to unhole the value here.  This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
     return MakePair(value, *receiver_handle);
@@ -8829,10 +9051,9 @@
   Handle<Object> value(args[0], isolate);
   CONVERT_ARG_CHECKED(Context, context, 1);
   CONVERT_ARG_CHECKED(String, name, 2);
-  CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
-  RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
-                 strict_unchecked == kNonStrictMode);
-  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+  StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
+      ? kNonStrictMode : kStrictMode;
 
   int index;
   PropertyAttributes attributes;
@@ -8845,45 +9066,37 @@
                                           &binding_flags);
 
   if (index >= 0) {
-    if (holder->IsContext()) {
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
-          context->get(index)->IsTheHole()) {
-        Handle<Object> error =
-            isolate->factory()->NewReferenceError("not_defined",
-                                                  HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-      // Ignore if read_only variable.
-      if ((attributes & READ_ONLY) == 0) {
-        // Context is a fixed array and set cannot fail.
-        context->set(index, *value);
-      } else if (strict_mode == kStrictMode) {
-        // Setting read only property in strict mode.
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("strict_cannot_assign",
-                                             HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-    } else {
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<Object> result =
-          SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
-      if (result.is_null()) {
-        ASSERT(isolate->has_pending_exception());
-        return Failure::Exception();
-      }
+    // The property was found in a context slot.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+        context->get(index)->IsTheHole()) {
+      Handle<Object> error =
+          isolate->factory()->NewReferenceError("not_defined",
+                                                HandleVector(&name, 1));
+      return isolate->Throw(*error);
+    }
+    // Ignore if read_only variable.
+    if ((attributes & READ_ONLY) == 0) {
+      // Context is a fixed array and set cannot fail.
+      context->set(index, *value);
+    } else if (strict_mode == kStrictMode) {
+      // Setting read only property in strict mode.
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("strict_cannot_assign",
+                                           HandleVector(&name, 1));
+      return isolate->Throw(*error);
     }
     return *value;
   }
 
-  // Slow case: The property is not in a FixedArray context.
-  // It is either in an JSObject extension context or it was not found.
-  Handle<JSObject> context_ext;
+  // Slow case: The property is not in a context slot.  It is either in a
+  // context extension object, a property of the subject of a with, or a
+  // property of the global object.
+  Handle<JSObject> object;
 
   if (!holder.is_null()) {
-    // The property exists in the extension context.
-    context_ext = Handle<JSObject>::cast(holder);
+    // The property exists on the holder.
+    object = Handle<JSObject>::cast(holder);
   } else {
     // The property was not found.
     ASSERT(attributes == ABSENT);
@@ -8891,22 +9104,21 @@
     if (strict_mode == kStrictMode) {
       // Throw in strict mode (assignment to undefined variable).
       Handle<Object> error =
-        isolate->factory()->NewReferenceError(
-            "not_defined", HandleVector(&name, 1));
+          isolate->factory()->NewReferenceError(
+              "not_defined", HandleVector(&name, 1));
       return isolate->Throw(*error);
     }
-    // In non-strict mode, the property is stored in the global context.
+    // In non-strict mode, the property is added to the global object.
     attributes = NONE;
-    context_ext = Handle<JSObject>(isolate->context()->global());
+    object = Handle<JSObject>(isolate->context()->global());
   }
 
-  // Set the property, but ignore if read_only variable on the context
-  // extension object itself.
+  // Set the property if it's not read only or doesn't yet exist.
   if ((attributes & READ_ONLY) == 0 ||
-      (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
+      (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
     RETURN_IF_EMPTY_HANDLE(
         isolate,
-        SetProperty(context_ext, name, value, NONE, strict_mode));
+        SetProperty(object, name, value, NONE, strict_mode));
   } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
     // Setting read only property in strict mode.
     Handle<Object> error =
@@ -8965,42 +9177,6 @@
 }
 
 
-// NOTE: These PrintXXX functions are defined for all builds (not just
-// DEBUG builds) because we may want to be able to trace function
-// calls in all modes.
-static void PrintString(String* str) {
-  // not uncommon to have empty strings
-  if (str->length() > 0) {
-    SmartArrayPointer<char> s =
-        str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    PrintF("%s", *s);
-  }
-}
-
-
-static void PrintObject(Object* obj) {
-  if (obj->IsSmi()) {
-    PrintF("%d", Smi::cast(obj)->value());
-  } else if (obj->IsString() || obj->IsSymbol()) {
-    PrintString(String::cast(obj));
-  } else if (obj->IsNumber()) {
-    PrintF("%g", obj->Number());
-  } else if (obj->IsFailure()) {
-    PrintF("<failure>");
-  } else if (obj->IsUndefined()) {
-    PrintF("<undefined>");
-  } else if (obj->IsNull()) {
-    PrintF("<null>");
-  } else if (obj->IsTrue()) {
-    PrintF("<true>");
-  } else if (obj->IsFalse()) {
-    PrintF("<false>");
-  } else {
-    PrintF("%p", reinterpret_cast<void*>(obj));
-  }
-}
-
-
 static int StackSize() {
   int n = 0;
   for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
@@ -9019,38 +9195,33 @@
   }
 
   if (result == NULL) {
-    // constructor calls
-    JavaScriptFrameIterator it;
-    JavaScriptFrame* frame = it.frame();
-    if (frame->IsConstructor()) PrintF("new ");
-    // function name
-    Object* fun = frame->function();
-    if (fun->IsJSFunction()) {
-      PrintObject(JSFunction::cast(fun)->shared()->name());
-    } else {
-      PrintObject(fun);
-    }
-    // function arguments
-    // (we are intentionally only printing the actually
-    // supplied parameters, not all parameters required)
-    PrintF("(this=");
-    PrintObject(frame->receiver());
-    const int length = frame->ComputeParametersCount();
-    for (int i = 0; i < length; i++) {
-      PrintF(", ");
-      PrintObject(frame->GetParameter(i));
-    }
-    PrintF(") {\n");
-
+    JavaScriptFrame::PrintTop(stdout, true, false);
+    PrintF(" {\n");
   } else {
     // function result
     PrintF("} -> ");
-    PrintObject(result);
+    result->ShortPrint();
     PrintF("\n");
   }
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceElementsKindTransition) {
+  ASSERT(args.length() == 5);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_SMI_ARG_CHECKED(from_kind, 1);
+  CONVERT_ARG_CHECKED(FixedArrayBase, from_elements, 2);
+  CONVERT_SMI_ARG_CHECKED(to_kind, 3);
+  CONVERT_ARG_CHECKED(FixedArrayBase, to_elements, 4);
+  NoHandleAllocation ha;
+  PrintF("*");
+  obj->PrintElementsTransition(stdout,
+      static_cast<ElementsKind>(from_kind), *from_elements,
+      static_cast<ElementsKind>(to_kind), *to_elements);
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
   ASSERT(args.length() == 0);
   NoHandleAllocation ha;
@@ -9126,6 +9297,10 @@
   FlattenString(str);
 
   CONVERT_ARG_CHECKED(JSArray, output, 1);
+
+  MaybeObject* maybe_result_array =
+      output->EnsureCanContainNonSmiElements();
+  if (maybe_result_array->IsFailure()) return maybe_result_array;
   RUNTIME_ASSERT(output->HasFastElements());
 
   AssertNoAllocation no_allocation;
@@ -9245,10 +9420,8 @@
   }
 
   // Compile source string in the global context.
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
-                                                            context,
-                                                            true,
-                                                            kNonStrictMode);
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
+      source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9261,7 +9434,8 @@
 static ObjectPair CompileGlobalEval(Isolate* isolate,
                                     Handle<String> source,
                                     Handle<Object> receiver,
-                                    StrictModeFlag strict_mode) {
+                                    LanguageMode language_mode,
+                                    int scope_position) {
   Handle<Context> context = Handle<Context>(isolate->context());
   Handle<Context> global_context = Handle<Context>(context->global_context());
 
@@ -9279,7 +9453,8 @@
       source,
       Handle<Context>(isolate->context()),
       context->IsGlobalContext(),
-      strict_mode);
+      language_mode,
+      scope_position);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9289,91 +9464,28 @@
 
 
 RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
-  ASSERT(args.length() == 4);
+  ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
   Handle<Object> callee = args.at<Object>(0);
-  Handle<Object> receiver;  // Will be overwritten.
 
-  // Compute the calling context.
-  Handle<Context> context = Handle<Context>(isolate->context(), isolate);
-#ifdef DEBUG
-  // Make sure Isolate::context() agrees with the old code that traversed
-  // the stack frames to compute the context.
-  StackFrameLocator locator;
-  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
-  ASSERT(Context::cast(frame->context()) == *context);
-#endif
-
-  // Find where the 'eval' symbol is bound. It is unaliased only if
-  // it is bound in the global context.
-  int index = -1;
-  PropertyAttributes attributes = ABSENT;
-  BindingFlags binding_flags;
-  while (true) {
-    receiver = context->Lookup(isolate->factory()->eval_symbol(),
-                               FOLLOW_PROTOTYPE_CHAIN,
-                               &index,
-                               &attributes,
-                               &binding_flags);
-    // Stop search when eval is found or when the global context is
-    // reached.
-    if (attributes != ABSENT || context->IsGlobalContext()) break;
-    context = Handle<Context>(context->previous(), isolate);
-  }
-
-  // If eval could not be resolved, it has been deleted and we need to
-  // throw a reference error.
-  if (attributes == ABSENT) {
-    Handle<Object> name = isolate->factory()->eval_symbol();
-    Handle<Object> reference_error =
-        isolate->factory()->NewReferenceError("not_defined",
-                                              HandleVector(&name, 1));
-    return MakePair(isolate->Throw(*reference_error), NULL);
-  }
-
-  if (!context->IsGlobalContext()) {
-    // 'eval' is not bound in the global context. Just call the function
-    // with the given arguments. This is not necessarily the global eval.
-    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
-      receiver = isolate->factory()->the_hole_value();
-    }
-    return MakePair(*callee, *receiver);
-  }
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
+  // If "eval" didn't refer to the original GlobalEval, it's not a
+  // direct call to eval.
+  // (And even if it is, but the first argument isn't a string, just let
+  // execution default to an indirect call to eval, which will also return
+  // the first argument without doing anything).
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
     return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
-  ASSERT(args[3]->IsSmi());
+  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+  ASSERT(args[4]->IsSmi());
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
-}
-
-
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
-  ASSERT(args.length() == 4);
-
-  HandleScope scope(isolate);
-  Handle<Object> callee = args.at<Object>(0);
-
-  // 'eval' is bound in the global context, but it may have been overwritten.
-  // Compare it to the builtin 'GlobalEval' function to make sure.
-  if (*callee != isolate->global_context()->global_eval_fun() ||
-      !args[1]->IsString()) {
-    return MakePair(*callee, isolate->heap()->the_hole_value());
-  }
-
-  ASSERT(args[3]->IsSmi());
-  return CompileGlobalEval(isolate,
-                           args.at<String>(1),
-                           args.at<Object>(2),
-                           static_cast<StrictModeFlag>(args.smi_at(3)));
+                           language_mode,
+                           args.smi_at(4));
 }
 
 
@@ -9386,9 +9498,9 @@
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, func, 0);
 
-  Handle<Map> map = func->shared()->strict_mode()
-                        ? isolate->strict_mode_function_instance_map()
-                        : isolate->function_instance_map();
+  Handle<Map> map = func->shared()->is_classic_mode()
+      ? isolate->function_instance_map()
+      : isolate->strict_mode_function_instance_map();
 
   ASSERT(func->map()->instance_type() == map->instance_type());
   ASSERT(func->map()->instance_size() == map->instance_size());
@@ -9426,7 +9538,7 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
   CONVERT_CHECKED(JSObject, element, args[1]);
-  RUNTIME_ASSERT(array->HasFastElements());
+  RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
   int length = Smi::cast(array->length())->value();
   FixedArray* elements = FixedArray::cast(array->elements());
   for (int i = 0; i < length; i++) {
@@ -9485,9 +9597,8 @@
       // Fall-through to dictionary mode.
     }
     ASSERT(!fast_elements_);
-    Handle<SeededNumberDictionary> dict(
-        SeededNumberDictionary::cast(*storage_));
-    Handle<SeededNumberDictionary> result =
+    Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
+    Handle<NumberDictionary> result =
         isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
     if (!result.is_identical_to(dict)) {
       // Dictionary needed to grow.
@@ -9510,9 +9621,11 @@
         isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
     Handle<Map> map;
     if (fast_elements_) {
-      map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          FAST_ELEMENTS);
     } else {
-      map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          DICTIONARY_ELEMENTS);
     }
     array->set_map(*map);
     array->set_length(*length);
@@ -9525,15 +9638,14 @@
   void SetDictionaryMode(uint32_t index) {
     ASSERT(fast_elements_);
     Handle<FixedArray> current_storage(*storage_);
-    Handle<SeededNumberDictionary> slow_storage(
-        isolate_->factory()->NewSeededNumberDictionary(
-            current_storage->length()));
+    Handle<NumberDictionary> slow_storage(
+        isolate_->factory()->NewNumberDictionary(current_storage->length()));
     uint32_t current_length = static_cast<uint32_t>(current_storage->length());
     for (uint32_t i = 0; i < current_length; i++) {
       HandleScope loop_scope;
       Handle<Object> element(current_storage->get(i));
       if (!element->IsTheHole()) {
-        Handle<SeededNumberDictionary> new_storage =
+        Handle<NumberDictionary> new_storage =
           isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
         if (!new_storage.is_identical_to(slow_storage)) {
           slow_storage = loop_scope.CloseAndEscape(new_storage);
@@ -9568,6 +9680,7 @@
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
@@ -9579,9 +9692,13 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS:
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> dictionary(
-          SeededNumberDictionary::cast(array->elements()));
+      Handle<NumberDictionary> dictionary(
+          NumberDictionary::cast(array->elements()));
       int capacity = dictionary->Capacity();
       for (int i = 0; i < capacity; i++) {
         Handle<Object> key(dictionary->KeyAt(i));
@@ -9591,7 +9708,16 @@
       }
       break;
     }
-    default:
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
       // External arrays are always dense.
       return length;
   }
@@ -9657,6 +9783,7 @@
                                   List<uint32_t>* indices) {
   ElementsKind kind = object->GetElementsKind();
   switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(object->elements()));
       uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9668,9 +9795,13 @@
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> dict(
-          SeededNumberDictionary::cast(object->elements()));
+      Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
       uint32_t capacity = dict->Capacity();
       for (uint32_t j = 0; j < capacity; j++) {
         HandleScope loop_scope;
@@ -9777,6 +9908,7 @@
                             ArrayConcatVisitor* visitor) {
   uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
   switch (receiver->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
@@ -9791,15 +9923,20 @@
         } else if (receiver->HasElement(j)) {
           // Call GetElement on receiver, not its prototype, or getters won't
           // have the correct receiver.
-          element_value = GetElement(receiver, j);
-          if (element_value.is_null()) return false;
+          element_value = Object::GetElement(receiver, j);
+          RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
           visitor->visit(j, element_value);
         }
       }
       break;
     }
+    case FAST_DOUBLE_ELEMENTS: {
+      // TODO(1810): Decide if it's worthwhile to implement this.
+      UNREACHABLE();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
-      Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
+      Handle<NumberDictionary> dict(receiver->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
       // Collect all indices in the object and the prototypes less
       // than length. This might introduce duplicates in the indices list.
@@ -9810,8 +9947,8 @@
       while (j < n) {
         HandleScope loop_scope;
         uint32_t index = indices[j];
-        Handle<Object> element = GetElement(receiver, index);
-        if (element.is_null()) return false;
+        Handle<Object> element = Object::GetElement(receiver, index);
+        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
         visitor->visit(index, element);
         // Skip to next different index (i.e., omit duplicates).
         do {
@@ -9908,6 +10045,13 @@
       uint32_t element_estimate;
       if (obj->IsJSArray()) {
         Handle<JSArray> array(Handle<JSArray>::cast(obj));
+        // TODO(1810): Find out if it's worthwhile to properly support
+        // arbitrary ElementsKinds. For now, pessimistically transition to
+        // FAST_ELEMENTS.
+        if (array->HasFastDoubleElements()) {
+          array = Handle<JSArray>::cast(
+              TransitionElementsKind(array, FAST_ELEMENTS));
+        }
         length_estimate =
             static_cast<uint32_t>(array->length()->Number());
         element_estimate =
@@ -9948,7 +10092,7 @@
     uint32_t at_least_space_for = estimate_nof_elements +
                                   (estimate_nof_elements >> 2);
     storage = Handle<FixedArray>::cast(
-        isolate->factory()->NewSeededNumberDictionary(at_least_space_for));
+        isolate->factory()->NewNumberDictionary(at_least_space_for));
   }
 
   ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -10005,15 +10149,17 @@
   CONVERT_CHECKED(JSArray, to, args[1]);
   FixedArrayBase* new_elements = from->elements();
   MaybeObject* maybe_new_map;
+  ElementsKind elements_kind;
   if (new_elements->map() == isolate->heap()->fixed_array_map() ||
       new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
-    maybe_new_map = to->map()->GetFastElementsMap();
+    elements_kind = FAST_ELEMENTS;
   } else if (new_elements->map() ==
              isolate->heap()->fixed_double_array_map()) {
-    maybe_new_map = to->map()->GetFastDoubleElementsMap();
+    elements_kind = FAST_DOUBLE_ELEMENTS;
   } else {
-    maybe_new_map = to->map()->GetSlowElementsMap();
+    elements_kind = DICTIONARY_ELEMENTS;
   }
+  maybe_new_map = to->GetElementsTransitionMap(elements_kind);
   Object* new_map;
   if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
   to->set_map(Map::cast(new_map));
@@ -10034,8 +10180,7 @@
   CONVERT_CHECKED(JSObject, object, args[0]);
   HeapObject* elements = object->elements();
   if (elements->IsDictionary()) {
-    int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
-    return Smi::FromInt(result);
+    return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
   } else if (object->IsJSArray()) {
     return JSArray::cast(object)->length();
   } else {
@@ -10060,9 +10205,9 @@
   }
 
   Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
-  Handle<Object> tmp1 = GetElement(jsobject, index1);
+  Handle<Object> tmp1 = Object::GetElement(jsobject, index1);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
-  Handle<Object> tmp2 = GetElement(jsobject, index2);
+  Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
 
   RETURN_IF_EMPTY_HANDLE(isolate,
@@ -10087,7 +10232,11 @@
   if (array->elements()->IsDictionary()) {
     // Create an array and get all the keys into it, then remove all the
     // keys that are not integers in the range 0 to length-1.
-    Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
+    bool threw = false;
+    Handle<FixedArray> keys =
+        GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
+    if (threw) return Failure::Exception();
+
     int keys_length = keys->length();
     for (int i = 0; i < keys_length; i++) {
       Object* key = keys->get(i);
@@ -10099,7 +10248,9 @@
     }
     return *isolate->factory()->NewJSArrayWithElements(keys);
   } else {
-    ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
+    ASSERT(array->HasFastElements() ||
+           array->HasFastSmiOnlyElements() ||
+           array->HasFastDoubleElements());
     Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
     // -1 means start of array.
     single_interval->set(0, Smi::FromInt(-1));
@@ -10218,8 +10369,8 @@
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
       if (structure->IsForeign() || structure->IsAccessorInfo()) {
-        MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
-            receiver, structure, name, result->holder());
+        MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
+            receiver, structure, name);
         if (!maybe_value->ToObject(&value)) {
           if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
@@ -10241,10 +10392,11 @@
     case CONSTANT_TRANSITION:
     case NULL_DESCRIPTOR:
       return heap->undefined_value();
-    default:
+    case HANDLER:
       UNREACHABLE();
+      return heap->undefined_value();
   }
-  UNREACHABLE();
+  UNREACHABLE();  // keep the compiler happy
   return heap->undefined_value();
 }
 
@@ -10310,7 +10462,7 @@
   // Try local lookup on each of the objects.
   Handle<JSObject> jsproto = obj;
   for (int i = 0; i < length; i++) {
-    LookupResult result;
+    LookupResult result(isolate);
     jsproto->LocalLookup(*name, &result);
     if (result.IsProperty()) {
       // LookupResult is not GC safe as it holds raw object pointers.
@@ -10367,7 +10519,7 @@
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_CHECKED(String, name, 1);
 
-  LookupResult result;
+  LookupResult result(isolate);
   obj->Lookup(*name, &result);
   if (result.IsProperty()) {
     return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
@@ -10551,6 +10703,18 @@
 static const int kFrameDetailsFlagsIndex = 8;
 static const int kFrameDetailsFirstDynamicIndex = 9;
 
+
+static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+                                             JavaScriptFrame* frame) {
+  SaveContext* save = isolate->save_context();
+  while (save != NULL && !save->IsBelowFrame(frame)) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
+  return save;
+}
+
+
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -10606,11 +10770,7 @@
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(it.frame())) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
 
   // Get the frame id.
   Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
@@ -10627,9 +10787,8 @@
   // Get scope info and read from it for local variable information.
   Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> scope_info(shared->scope_info());
-  ASSERT(*scope_info != SerializedScopeInfo::Empty());
-  ScopeInfo<> info(*scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
+  ASSERT(*scope_info != ScopeInfo::Empty());
 
   // Get the locals names and values into a temporary array.
   //
@@ -10637,24 +10796,26 @@
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals =
-      isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
+      isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
 
   // Fill in the values of the locals.
   int i = 0;
-  for (; i < info.number_of_stack_slots(); ++i) {
+  for (; i < scope_info->StackLocalCount(); ++i) {
     // Use the value from the stack.
-    locals->set(i * 2, *info.LocalName(i));
+    locals->set(i * 2, scope_info->LocalName(i));
     locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
   }
-  if (i < info.NumberOfLocals()) {
+  if (i < scope_info->LocalCount()) {
     // Get the context containing declarations.
     Handle<Context> context(
         Context::cast(it.frame()->context())->declaration_context());
-    for (; i < info.NumberOfLocals(); ++i) {
-      Handle<String> name = info.LocalName(i);
+    for (; i < scope_info->LocalCount(); ++i) {
+      Handle<String> name(scope_info->LocalName(i));
+      VariableMode mode;
+      InitializationFlag init_flag;
       locals->set(i * 2, *name);
-      locals->set(i * 2 + 1,
-                  context->get(scope_info->ContextSlotIndex(*name, NULL)));
+      locals->set(i * 2 + 1, context->get(
+          scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
     }
   }
 
@@ -10708,7 +10869,7 @@
 
   // Find the number of arguments to fill. At least fill the number of
   // parameters for the function and fill more if more parameters are provided.
-  int argument_count = info.number_of_parameters();
+  int argument_count = scope_info->ParameterCount();
   if (argument_count < frame_inspector.GetParametersCount()) {
     argument_count = frame_inspector.GetParametersCount();
   }
@@ -10720,7 +10881,7 @@
 
   // Calculate the size of the result.
   int details_size = kFrameDetailsFirstDynamicIndex +
-                     2 * (argument_count + info.NumberOfLocals()) +
+                     2 * (argument_count + scope_info->LocalCount()) +
                      (at_return ? 1 : 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
 
@@ -10735,7 +10896,7 @@
 
   // Add the locals count
   details->set(kFrameDetailsLocalCountIndex,
-               Smi::FromInt(info.NumberOfLocals()));
+               Smi::FromInt(scope_info->LocalCount()));
 
   // Add the source position.
   if (position != RelocInfo::kNoPosition) {
@@ -10770,8 +10931,8 @@
   // Add arguments name and value.
   for (int i = 0; i < argument_count; i++) {
     // Name of the argument.
-    if (i < info.number_of_parameters()) {
-      details->set(details_index++, *info.parameter_name(i));
+    if (i < scope_info->ParameterCount()) {
+      details->set(details_index++, scope_info->ParameterName(i));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
@@ -10786,7 +10947,7 @@
   }
 
   // Add locals name and value from the temporary copy from the function frame.
-  for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
+  for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
     details->set(details_index++, locals->get(i));
   }
 
@@ -10799,7 +10960,9 @@
   // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
   // THE FRAME ITERATOR TO WRAP THE RECEIVER.
   Handle<Object> receiver(it.frame()->receiver(), isolate);
-  if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
+  if (!receiver->IsJSObject() &&
+      shared->is_classic_mode() &&
+      !shared->native()) {
     // If the receiver is not a JSObject and the function is not a
     // builtin or strict-mode we have hit an optimization where a
     // value object is not converted into a wrapped JS objects. To
@@ -10822,21 +10985,20 @@
 // Copy all the context locals into an object used to materialize a scope.
 static bool CopyContextLocalsToScopeObject(
     Isolate* isolate,
-    Handle<SerializedScopeInfo> serialized_scope_info,
-    ScopeInfo<>& scope_info,
+    Handle<ScopeInfo> scope_info,
     Handle<Context> context,
     Handle<JSObject> scope_object) {
   // Fill all context locals to the context extension.
-  for (int i = Context::MIN_CONTEXT_SLOTS;
-       i < scope_info.number_of_context_slots();
-       i++) {
-    int context_index = serialized_scope_info->ContextSlotIndex(
-        *scope_info.context_slot_name(i), NULL);
+  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    int context_index = scope_info->ContextSlotIndex(
+        scope_info->ContextLocalName(i), &mode, &init_flag);
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(scope_object,
-                    scope_info.context_slot_name(i),
+                    Handle<String>(scope_info->ContextLocalName(i)),
                     Handle<Object>(context->get(context_index), isolate),
                     NONE,
                     kNonStrictMode),
@@ -10855,8 +11017,7 @@
     int inlined_frame_index) {
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
   FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
@@ -10865,11 +11026,11 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // First fill all parameters.
-  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.parameter_name(i),
+                    Handle<String>(scope_info->ParameterName(i)),
                     Handle<Object>(frame_inspector.GetParameter(i)),
                     NONE,
                     kNonStrictMode),
@@ -10877,24 +11038,23 @@
   }
 
   // Second fill all stack locals.
-  for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
+  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    scope_info.stack_slot_name(i),
+                    Handle<String>(scope_info->StackLocalName(i)),
                     Handle<Object>(frame_inspector.GetExpression(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+  if (scope_info->HasContext()) {
     // Third fill all context locals.
     Handle<Context> frame_context(Context::cast(frame->context()));
     Handle<Context> function_context(frame_context->declaration_context());
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        function_context, local_scope)) {
+    if (!CopyContextLocalsToScopeObject(
+            isolate, scope_info, function_context, local_scope)) {
       return Handle<JSObject>();
     }
 
@@ -10904,7 +11064,11 @@
       if (function_context->has_extension() &&
           !function_context->IsGlobalContext()) {
         Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-        Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+        bool threw = false;
+        Handle<FixedArray> keys =
+            GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+        if (threw) return Handle<JSObject>();
+
         for (int i = 0; i < keys->length(); i++) {
           // Names of variables introduced by eval are strings.
           ASSERT(keys->get(i)->IsString());
@@ -10933,8 +11097,7 @@
   ASSERT(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(shared->scope_info());
 
   // Allocate and initialize a JSObject with all the content of theis function
   // closure.
@@ -10942,9 +11105,8 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals to the context extension.
-  if (!CopyContextLocalsToScopeObject(isolate,
-                                      serialized_scope_info, scope_info,
-                                      context, closure_scope)) {
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, closure_scope)) {
     return Handle<JSObject>();
   }
 
@@ -10952,7 +11114,11 @@
   // be variables introduced by eval.
   if (context->has_extension()) {
     Handle<JSObject> ext(JSObject::cast(context->extension()));
-    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+    bool threw = false;
+    Handle<FixedArray> keys =
+        GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
+    if (threw) return Handle<JSObject>();
+
     for (int i = 0; i < keys->length(); i++) {
       // Names of variables introduced by eval are strings.
       ASSERT(keys->get(i)->IsString());
@@ -10995,9 +11161,7 @@
     Isolate* isolate,
     Handle<Context> context) {
   ASSERT(context->IsBlockContext());
-  Handle<SerializedScopeInfo> serialized_scope_info(
-      SerializedScopeInfo::cast(context->extension()));
-  ScopeInfo<> scope_info(*serialized_scope_info);
+  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -11005,21 +11169,19 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals.
-  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    if (!CopyContextLocalsToScopeObject(isolate,
-                                        serialized_scope_info, scope_info,
-                                        context, block_scope)) {
-      return Handle<JSObject>();
-    }
+  if (!CopyContextLocalsToScopeObject(
+          isolate, scope_info, context, block_scope)) {
+    return Handle<JSObject>();
   }
 
   return block_scope;
 }
 
 
-// Iterate over the actual scopes visible from a stack frame. All scopes are
+// Iterate over the actual scopes visible from a stack frame. The iteration
+// proceeds from the innermost visible nested scope outwards. All scopes are
 // backed by an actual context except the local scope, which is inserted
-// "artifically" in the context chain.
+// "artificially" in the context chain.
 class ScopeIterator {
  public:
   enum ScopeType {
@@ -11039,27 +11201,83 @@
       inlined_frame_index_(inlined_frame_index),
       function_(JSFunction::cast(frame->function())),
       context_(Context::cast(frame->context())),
-      local_done_(false),
-      at_local_(false) {
+      nested_scope_chain_(4) {
 
-    // Check whether the first scope is actually a local scope.
-    // If there is a stack slot for .result then this local scope has been
-    // created for evaluating top level code and it is not a real local scope.
-    // Checking for the existence of .result seems fragile, but the scope info
-    // saved with the code object does not otherwise have that information.
-    int index = function_->shared()->scope_info()->
-        StackSlotIndex(isolate_->heap()->result_symbol());
-    if (index >= 0) {
-      local_done_ = true;
-    } else if (context_->IsGlobalContext() ||
-               context_->IsFunctionContext()) {
-      at_local_ = true;
-    } else if (context_->closure() != *function_) {
-      // The context_ is a block or with or catch block from the outer function.
-      ASSERT(context_->IsWithContext() ||
-             context_->IsCatchContext() ||
-             context_->IsBlockContext());
-      at_local_ = true;
+    // Catch the case when the debugger stops in an internal function.
+    Handle<SharedFunctionInfo> shared_info(function_->shared());
+    Handle<ScopeInfo> scope_info(shared_info->scope_info());
+    if (shared_info->script() == isolate->heap()->undefined_value()) {
+      while (context_->closure() == *function_) {
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
+      return;
+    }
+
+    // Get the debug info (create it if it does not exist).
+    if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+      // Return if ensuring debug info failed.
+      return;
+    }
+    Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+    // Find the break point where execution has stopped.
+    BreakLocationIterator break_location_iterator(debug_info,
+                                                  ALL_BREAK_LOCATIONS);
+    break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+    if (break_location_iterator.IsExit()) {
+      // We are within the return sequence. At the momemt it is not possible to
+      // get a source position which is consistent with the current scope chain.
+      // Thus all nested with, catch and block contexts are skipped and we only
+      // provide the function scope.
+      if (scope_info->HasContext()) {
+        context_ = Handle<Context>(context_->declaration_context(), isolate_);
+      } else {
+        while (context_->closure() == *function_) {
+          context_ = Handle<Context>(context_->previous(), isolate_);
+        }
+      }
+      if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
+    } else {
+      // Reparse the code and analyze the scopes.
+      ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+      Handle<Script> script(Script::cast(shared_info->script()));
+      Scope* scope = NULL;
+
+      // Check whether we are in global, eval or function code.
+      Handle<ScopeInfo> scope_info(shared_info->scope_info());
+      if (scope_info->Type() != FUNCTION_SCOPE) {
+        // Global or eval code.
+        CompilationInfo info(script);
+        if (scope_info->Type() == GLOBAL_SCOPE) {
+          info.MarkAsGlobal();
+        } else {
+          ASSERT(scope_info->Type() == EVAL_SCOPE);
+          info.MarkAsEval();
+          info.SetCallingContext(Handle<Context>(function_->context()));
+        }
+        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+          scope = info.function()->scope();
+        }
+      } else {
+        // Function code
+        CompilationInfo info(shared_info);
+        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+          scope = info.function()->scope();
+        }
+      }
+
+      // Retrieve the scope chain for the current position.
+      if (scope != NULL) {
+        int source_position = shared_info->code()->SourcePosition(frame_->pc());
+        scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+      } else {
+        // A failed reparse indicates that the preparser has diverged from the
+        // parser or that the preparse data given to the initial parse has been
+        // faulty. We fail in debug mode but in release mode we only provide the
+        // information we get from the context chain but nothing about
+        // completely stack allocated scopes or stack allocated locals.
+        UNREACHABLE();
+      }
     }
   }
 
@@ -11068,40 +11286,49 @@
 
   // Move to the next scope.
   void Next() {
-    // If at a local scope mark the local scope as passed.
-    if (at_local_) {
-      at_local_ = false;
-      local_done_ = true;
-
-      // If the current context is not associated with the local scope the
-      // current context is the next real scope, so don't move to the next
-      // context in this case.
-      if (context_->closure() != *function_) {
-        return;
-      }
-    }
-
-    // The global scope is always the last in the chain.
-    if (context_->IsGlobalContext()) {
+    ScopeType scope_type = Type();
+    if (scope_type == ScopeTypeGlobal) {
+      // The global scope is always the last in the chain.
+      ASSERT(context_->IsGlobalContext());
       context_ = Handle<Context>();
       return;
     }
-
-    // Move to the next context.
-    context_ = Handle<Context>(context_->previous(), isolate_);
-
-    // If passing the local scope indicate that the current scope is now the
-    // local scope.
-    if (!local_done_ &&
-        (context_->IsGlobalContext() || context_->IsFunctionContext())) {
-      at_local_ = true;
+    if (nested_scope_chain_.is_empty()) {
+      context_ = Handle<Context>(context_->previous(), isolate_);
+    } else {
+      if (nested_scope_chain_.last()->HasContext()) {
+        ASSERT(context_->previous() != NULL);
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
+      nested_scope_chain_.RemoveLast();
     }
   }
 
   // Return the type of the current scope.
   ScopeType Type() {
-    if (at_local_) {
-      return ScopeTypeLocal;
+    if (!nested_scope_chain_.is_empty()) {
+      Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+      switch (scope_info->Type()) {
+        case FUNCTION_SCOPE:
+          ASSERT(context_->IsFunctionContext() ||
+                 !scope_info->HasContext());
+          return ScopeTypeLocal;
+        case GLOBAL_SCOPE:
+          ASSERT(context_->IsGlobalContext());
+          return ScopeTypeGlobal;
+        case WITH_SCOPE:
+          ASSERT(context_->IsWithContext());
+          return ScopeTypeWith;
+        case CATCH_SCOPE:
+          ASSERT(context_->IsCatchContext());
+          return ScopeTypeCatch;
+        case BLOCK_SCOPE:
+          ASSERT(!scope_info->HasContext() ||
+                 context_->IsBlockContext());
+          return ScopeTypeBlock;
+        case EVAL_SCOPE:
+          UNREACHABLE();
+      }
     }
     if (context_->IsGlobalContext()) {
       ASSERT(context_->global()->IsGlobalObject());
@@ -11127,6 +11354,7 @@
         return Handle<JSObject>(CurrentContext()->global());
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
+        ASSERT(nested_scope_chain_.length() == 1);
         return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
       case ScopeIterator::ScopeTypeWith:
         // Return the with object.
@@ -11143,13 +11371,28 @@
     return Handle<JSObject>();
   }
 
+  Handle<ScopeInfo> CurrentScopeInfo() {
+    if (!nested_scope_chain_.is_empty()) {
+      return nested_scope_chain_.last();
+    } else if (context_->IsBlockContext()) {
+      return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
+    } else if (context_->IsFunctionContext()) {
+      return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
+    }
+    return Handle<ScopeInfo>::null();
+  }
+
   // Return the context for this scope. For the local context there might not
   // be an actual context.
   Handle<Context> CurrentContext() {
-    if (at_local_ && context_->closure() != *function_) {
+    if (Type() == ScopeTypeGlobal ||
+        nested_scope_chain_.is_empty()) {
+      return context_;
+    } else if (nested_scope_chain_.last()->HasContext()) {
+      return context_;
+    } else {
       return Handle<Context>();
     }
-    return context_;
   }
 
 #ifdef DEBUG
@@ -11163,8 +11406,7 @@
 
       case ScopeIterator::ScopeTypeLocal: {
         PrintF("Local:\n");
-        ScopeInfo<> scope_info(function_->shared()->scope_info());
-        scope_info.Print();
+        function_->shared()->scope_info()->Print();
         if (!CurrentContext().is_null()) {
           CurrentContext()->Print();
           if (CurrentContext()->has_extension()) {
@@ -11212,8 +11454,7 @@
   int inlined_frame_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
-  bool local_done_;
-  bool at_local_;
+  List<Handle<ScopeInfo> > nested_scope_chain_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
@@ -11469,48 +11710,53 @@
   int target_start_position = RelocInfo::kNoPosition;
   Handle<SharedFunctionInfo> target;
   while (!done) {
-    HeapIterator iterator;
-    for (HeapObject* obj = iterator.next();
-         obj != NULL; obj = iterator.next()) {
-      if (obj->IsSharedFunctionInfo()) {
-        Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
-        if (shared->script() == *script) {
-          // If the SharedFunctionInfo found has the requested script data and
-          // contains the source position it is a candidate.
-          int start_position = shared->function_token_position();
-          if (start_position == RelocInfo::kNoPosition) {
-            start_position = shared->start_position();
-          }
-          if (start_position <= position &&
-              position <= shared->end_position()) {
-            // If there is no candidate or this function is within the current
-            // candidate this is the new candidate.
-            if (target.is_null()) {
-              target_start_position = start_position;
-              target = shared;
-            } else {
-              if (target_start_position == start_position &&
-                  shared->end_position() == target->end_position()) {
-                  // If a top-level function contain only one function
-                  // declartion the source for the top-level and the function is
-                  // the same. In that case prefer the non top-level function.
-                if (!shared->is_toplevel()) {
+    { // Extra scope for iterator and no-allocation.
+      isolate->heap()->EnsureHeapIsIterable();
+      AssertNoAllocation no_alloc_during_heap_iteration;
+      HeapIterator iterator;
+      for (HeapObject* obj = iterator.next();
+           obj != NULL; obj = iterator.next()) {
+        if (obj->IsSharedFunctionInfo()) {
+          Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+          if (shared->script() == *script) {
+            // If the SharedFunctionInfo found has the requested script data and
+            // contains the source position it is a candidate.
+            int start_position = shared->function_token_position();
+            if (start_position == RelocInfo::kNoPosition) {
+              start_position = shared->start_position();
+            }
+            if (start_position <= position &&
+                position <= shared->end_position()) {
+              // If there is no candidate or this function is within the current
+              // candidate this is the new candidate.
+              if (target.is_null()) {
+                target_start_position = start_position;
+                target = shared;
+              } else {
+                if (target_start_position == start_position &&
+                    shared->end_position() == target->end_position()) {
+                    // If a top-level function contain only one function
+                    // declartion the source for the top-level and the
+                    // function is the same. In that case prefer the non
+                    // top-level function.
+                  if (!shared->is_toplevel()) {
+                    target_start_position = start_position;
+                    target = shared;
+                  }
+                } else if (target_start_position <= start_position &&
+                           shared->end_position() <= target->end_position()) {
+                  // This containment check includes equality as a function
+                  // inside a top-level function can share either start or end
+                  // position with the top-level function.
                   target_start_position = start_position;
                   target = shared;
                 }
-              } else if (target_start_position <= start_position &&
-                         shared->end_position() <= target->end_position()) {
-                // This containment check includes equality as a function inside
-                // a top-level function can share either start or end position
-                // with the top-level function.
-                target_start_position = start_position;
-                target = shared;
               }
             }
           }
         }
-      }
-    }
+      }  // End for loop.
+    }  // End No allocation scope.
 
     if (target.is_null()) {
       return isolate->heap()->undefined_value();
@@ -11523,9 +11769,9 @@
     if (!done) {
       // If the candidate is not compiled compile it to reveal any inner
       // functions which might contain the requested source position.
-      CompileLazyShared(target, KEEP_EXCEPTION);
+      SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
     }
-  }
+  }  // End while loop.
 
   return *target;
 }
@@ -11671,46 +11917,65 @@
 
 // Creates a copy of the with context chain. The copy of the context chain is
 // is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Isolate* isolate,
-                                            Handle<JSFunction> function,
-                                            Handle<Context> current,
-                                            Handle<Context> base) {
-  // At the end of the chain. Return the base context to link to.
-  if (current->IsFunctionContext() || current->IsGlobalContext()) {
-    return base;
+static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
+                                                   Handle<JSFunction> function,
+                                                   Handle<Context> base,
+                                                   JavaScriptFrame* frame,
+                                                   int inlined_frame_index) {
+  HandleScope scope(isolate);
+  List<Handle<ScopeInfo> > scope_chain;
+  List<Handle<Context> > context_chain;
+
+  ScopeIterator it(isolate, frame, inlined_frame_index);
+  for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
+         it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
+    ASSERT(!it.Done());
+    scope_chain.Add(it.CurrentScopeInfo());
+    context_chain.Add(it.CurrentContext());
   }
 
-  // Recursively copy the with and catch contexts.
-  HandleScope scope(isolate);
-  Handle<Context> previous(current->previous());
-  Handle<Context> new_previous =
-      CopyWithContextChain(isolate, function, previous, base);
-  Handle<Context> new_current;
-  if (current->IsCatchContext()) {
-    Handle<String> name(String::cast(current->extension()));
-    Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
-    new_current =
-        isolate->factory()->NewCatchContext(function,
-                                            new_previous,
-                                            name,
-                                            thrown_object);
-  } else if (current->IsBlockContext()) {
-    Handle<SerializedScopeInfo> scope_info(
-        SerializedScopeInfo::cast(current->extension()));
-    new_current =
-        isolate->factory()->NewBlockContext(function, new_previous, scope_info);
-    // Copy context slots.
-    int num_context_slots = scope_info->NumberOfContextSlots();
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
-      new_current->set(i, current->get(i));
+  // At the end of the chain. Return the base context to link to.
+  Handle<Context> context = base;
+
+  // Iteratively copy and or materialize the nested contexts.
+  while (!scope_chain.is_empty()) {
+    Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
+    Handle<Context> current = context_chain.RemoveLast();
+    ASSERT(!(scope_info->HasContext() & current.is_null()));
+
+    if (scope_info->Type() == CATCH_SCOPE) {
+      Handle<String> name(String::cast(current->extension()));
+      Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+      context =
+          isolate->factory()->NewCatchContext(function,
+                                              context,
+                                              name,
+                                              thrown_object);
+    } else if (scope_info->Type() == BLOCK_SCOPE) {
+      // Materialize the contents of the block scope into a JSObject.
+      Handle<JSObject> block_scope_object =
+          MaterializeBlockScope(isolate, current);
+      if (block_scope_object.is_null()) {
+        return Handle<Context>::null();
+      }
+      // Allocate a new function context for the debug evaluation and set the
+      // extension object.
+      Handle<Context> new_context =
+          isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
+                                                 function);
+      new_context->set_extension(*block_scope_object);
+      new_context->set_previous(*context);
+      context = new_context;
+    } else {
+      ASSERT(scope_info->Type() == WITH_SCOPE);
+      ASSERT(current->IsWithContext());
+      Handle<JSObject> extension(JSObject::cast(current->extension()));
+      context =
+          isolate->factory()->NewWithContext(function, context, extension);
     }
-  } else {
-    ASSERT(current->IsWithContext());
-    Handle<JSObject> extension(JSObject::cast(current->extension()));
-    new_current =
-        isolate->factory()->NewWithContext(function, new_previous, extension);
   }
-  return scope.CloseAndEscape(new_current);
+
+  return scope.CloseAndEscape(context);
 }
 
 
@@ -11720,23 +11985,24 @@
                                          JavaScriptFrame* frame,
                                          int inlined_frame_index,
                                          Handle<JSFunction> function,
-                                         Handle<SerializedScopeInfo> scope_info,
-                                         const ScopeInfo<>* sinfo,
+                                         Handle<ScopeInfo> scope_info,
                                          Handle<Context> function_context) {
   // Try to find the value of 'arguments' to pass as parameter. If it is not
   // found (that is the debugged function does not reference 'arguments' and
   // does not support eval) then create an 'arguments' object.
   int index;
-  if (sinfo->number_of_stack_slots() > 0) {
+  if (scope_info->StackLocalCount() > 0) {
     index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
     if (index != -1) {
       return Handle<Object>(frame->GetExpression(index), isolate);
     }
   }
 
-  if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
-    index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
-                                         NULL);
+  if (scope_info->HasHeapAllocatedLocals()) {
+    VariableMode mode;
+    InitializationFlag init_flag;
+    index = scope_info->ContextSlotIndex(
+        isolate->heap()->arguments_symbol(), &mode, &init_flag);
     if (index != -1) {
       return Handle<Object>(function_context->get(index), isolate);
     }
@@ -11801,16 +12067,12 @@
   JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
-  Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
-  ScopeInfo<> sinfo(*scope_info);
+  Handle<ScopeInfo> scope_info(function->shared()->scope_info());
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->below(frame)) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
+  SaveContext* save = FindSavedContextForFrame(isolate, frame);
+
   SaveContext savex(isolate);
   isolate->set_context(*(save->context()));
 
@@ -11825,9 +12087,9 @@
                                       isolate->factory()->undefined_value());
   go_between->set_context(function->context());
 #ifdef DEBUG
-  ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
-  ASSERT(go_between_sinfo.number_of_parameters() == 0);
-  ASSERT(go_between_sinfo.number_of_context_slots() == 0);
+  Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
+  ASSERT(go_between_scope_info->ParameterCount() == 0);
+  ASSERT(go_between_scope_info->ContextLocalCount() == 0);
 #endif
 
   // Materialize the content of the local scope into a JSObject.
@@ -11845,10 +12107,14 @@
   Handle<Context> frame_context(Context::cast(frame->context()));
   Handle<Context> function_context;
   // Get the function's context if it has one.
-  if (scope_info->HasHeapAllocatedLocals()) {
+  if (scope_info->HasContext()) {
     function_context = Handle<Context>(frame_context->declaration_context());
   }
-  context = CopyWithContextChain(isolate, go_between, frame_context, context);
+  context = CopyNestedScopeContextChain(isolate,
+                                        go_between,
+                                        context,
+                                        frame,
+                                        inlined_frame_index);
 
   if (additional_context->IsJSObject()) {
     Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -11872,7 +12138,8 @@
       Compiler::CompileEval(function_source,
                             context,
                             context->IsGlobalContext(),
-                            kNonStrictMode);
+                            CLASSIC_MODE,
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -11886,17 +12153,20 @@
   if (has_pending_exception) return Failure::Exception();
 
   Handle<Object> arguments = GetArgumentsObject(isolate,
-                                                frame, inlined_frame_index,
-                                                function, scope_info,
-                                                &sinfo, function_context);
+                                                frame,
+                                                inlined_frame_index,
+                                                function,
+                                                scope_info,
+                                                function_context);
 
   // Invoke the evaluation function and return the result.
-  const int argc = 2;
-  Object** argv[argc] = { arguments.location(),
-                          Handle<Object>::cast(source).location() };
+  Handle<Object> argv[] = { arguments, source };
   Handle<Object> result =
-      Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
-                      argc, argv, &has_pending_exception);
+      Execution::Call(Handle<JSFunction>::cast(evaluation_function),
+                      receiver,
+                      ARRAY_SIZE(argv),
+                      argv,
+                      &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
   // Skip the global proxy as it has no properties and always delegates to the
@@ -11946,15 +12216,12 @@
   bool is_global = true;
 
   if (additional_context->IsJSObject()) {
-    // Create a function context first, than put 'with' context on top of it.
-    Handle<JSFunction> go_between = isolate->factory()->NewFunction(
-        isolate->factory()->empty_string(),
-        isolate->factory()->undefined_value());
-    go_between->set_context(*context);
-    context =
-        isolate->factory()->NewFunctionContext(
-            Context::MIN_CONTEXT_SLOTS, go_between);
-    context->set_extension(JSObject::cast(*additional_context));
+    // Create a new with context with the additional context information between
+    // the context of the debugged function and the eval code to be executed.
+    context = isolate->factory()->NewWithContext(
+        Handle<JSFunction>(context->closure()),
+        context,
+        Handle<JSObject>::cast(additional_context));
     is_global = false;
   }
 
@@ -11962,7 +12229,11 @@
   // Currently, the eval code will be executed in non-strict mode,
   // even in the strict code context.
   Handle<SharedFunctionInfo> shared =
-      Compiler::CompileEval(source, context, is_global, kNonStrictMode);
+      Compiler::CompileEval(source,
+                            context,
+                            is_global,
+                            CLASSIC_MODE,
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(
@@ -11975,6 +12246,8 @@
   Handle<Object> result =
     Execution::Call(compiled_function, receiver, 0, NULL,
                     &has_pending_exception);
+  // Clear the oneshot breakpoints so that the debugger does not step further.
+  isolate->debug()->ClearStepping();
   if (has_pending_exception) return Failure::Exception();
   return *result;
 }
@@ -12002,13 +12275,14 @@
   // Return result as a JS array.
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(result)->SetContent(*instances);
+  isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
   return *result;
 }
 
 
 // Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(JSObject* target,
+static int DebugReferencedBy(HeapIterator* iterator,
+                             JSObject* target,
                              Object* instance_filter, int max_references,
                              FixedArray* instances, int instances_size,
                              JSFunction* arguments_function) {
@@ -12018,9 +12292,8 @@
   // Iterate the heap.
   int count = 0;
   JSObject* last = NULL;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12085,7 +12358,11 @@
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  // The heap iterator reserves the right to do a GC to make the heap iterable.
+  // Due to the GC above we know it won't need to do that, but it seems cleaner
+  // to get the heap iterator constructed before we start having unprotected
+  // Object* locals that are not protected by handles.
 
   // Check parameters.
   CONVERT_CHECKED(JSObject, target, args[0]);
@@ -12095,6 +12372,7 @@
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
   RUNTIME_ASSERT(max_references >= 0);
 
+
   // Get the constructor function for context extension and arguments array.
   JSObject* arguments_boilerplate =
       isolate->context()->global_context()->arguments_boilerplate();
@@ -12103,7 +12381,9 @@
 
   // Get the number of referencing objects.
   int count;
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  HeapIterator heap_iterator;
+  count = DebugReferencedBy(&heap_iterator,
+                            target, instance_filter, max_references,
                             NULL, 0, arguments_function);
 
   // Allocate an array to hold the result.
@@ -12114,30 +12394,34 @@
   FixedArray* instances = FixedArray::cast(object);
 
   // Fill the referencing objects.
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  // AllocateFixedArray above does not make the heap non-iterable.
+  ASSERT(HEAP->IsHeapIterable());
+  HeapIterator heap_iterator2;
+  count = DebugReferencedBy(&heap_iterator2,
+                            target, instance_filter, max_references,
                             instances, count, arguments_function);
 
   // Return result as JS array.
   Object* result;
-  { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+  MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
       isolate->context()->global_context()->array_function());
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  if (!maybe_result->ToObject(&result)) return maybe_result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
 // Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(JSFunction* constructor, int max_references,
-                              FixedArray* instances, int instances_size) {
+static int DebugConstructedBy(HeapIterator* iterator,
+                              JSFunction* constructor,
+                              int max_references,
+                              FixedArray* instances,
+                              int instances_size) {
   AssertNoAllocation no_alloc;
 
   // Iterate the heap.
   int count = 0;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12165,7 +12449,7 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // Check parameters.
   CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -12174,7 +12458,12 @@
 
   // Get the number of referencing objects.
   int count;
-  count = DebugConstructedBy(constructor, max_references, NULL, 0);
+  HeapIterator heap_iterator;
+  count = DebugConstructedBy(&heap_iterator,
+                             constructor,
+                             max_references,
+                             NULL,
+                             0);
 
   // Allocate an array to hold the result.
   Object* object;
@@ -12183,8 +12472,14 @@
   }
   FixedArray* instances = FixedArray::cast(object);
 
+  ASSERT(HEAP->IsHeapIterable());
   // Fill the referencing objects.
-  count = DebugConstructedBy(constructor, max_references, instances, count);
+  HeapIterator heap_iterator2;
+  count = DebugConstructedBy(&heap_iterator2,
+                             constructor,
+                             max_references,
+                             instances,
+                             count);
 
   // Return result as JS array.
   Object* result;
@@ -12192,8 +12487,7 @@
           isolate->context()->global_context()->array_function());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
@@ -12223,7 +12517,7 @@
   // Get the function and make sure it is compiled.
   CONVERT_ARG_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->code()->PrintLn();
@@ -12239,7 +12533,7 @@
   // Get the function and make sure it is compiled.
   CONVERT_ARG_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   shared->construct_stub()->PrintLn();
@@ -12257,14 +12551,15 @@
 }
 
 
-static int FindSharedFunctionInfosForScript(Script* script,
+static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
+                                            Script* script,
                                             FixedArray* buffer) {
   AssertNoAllocation no_allocations;
-
   int counter = 0;
   int buffer_size = buffer->length();
-  HeapIterator iterator;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+  for (HeapObject* obj = iterator->next();
+       obj != NULL;
+       obj = iterator->next()) {
     ASSERT(obj != NULL);
     if (!obj->IsSharedFunctionInfo()) {
       continue;
@@ -12290,16 +12585,30 @@
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script_value, args[0]);
 
+
   Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
 
   const int kBufferSize = 32;
 
   Handle<FixedArray> array;
   array = isolate->factory()->NewFixedArray(kBufferSize);
-  int number = FindSharedFunctionInfosForScript(*script, *array);
+  int number;
+  {
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+  }
   if (number > kBufferSize) {
     array = isolate->factory()->NewFixedArray(number);
-    FindSharedFunctionInfosForScript(*script, *array);
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
   }
 
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12780,6 +13089,8 @@
   // Scan the heap for Script objects to find the script with the requested
   // script data.
   Handle<Script> script;
+  script_name->GetHeap()->EnsureHeapIsIterable();
+  AssertNoAllocation no_allocation_during_heap_iteration;
   HeapIterator iterator;
   HeapObject* obj = NULL;
   while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12828,34 +13139,32 @@
                                   Object* caller,
                                   bool* seen_caller) {
   // Only display JS frames.
-  if (!raw_frame->is_java_script())
+  if (!raw_frame->is_java_script()) {
     return false;
+  }
   JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
   Object* raw_fun = frame->function();
   // Not sure when this can happen but skip it just in case.
-  if (!raw_fun->IsJSFunction())
+  if (!raw_fun->IsJSFunction()) {
     return false;
+  }
   if ((raw_fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
     return false;
   }
   // Skip all frames until we've seen the caller.
   if (!(*seen_caller)) return false;
-  // Also, skip the most obvious builtin calls. We recognize builtins
-  // as (1) functions called with the builtins object as the receiver and
-  // as (2) functions from native scripts called with undefined as the
-  // receiver (direct calls to helper functions in the builtins
-  // code). Some builtin calls (such as Number.ADD which is invoked
-  // using 'call') are very difficult to recognize so we're leaving
-  // them in for now.
-  if (frame->receiver()->IsJSBuiltinsObject()) {
-    return false;
-  }
-  JSFunction* fun = JSFunction::cast(raw_fun);
-  Object* raw_script = fun->shared()->script();
-  if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
-    int script_type = Script::cast(raw_script)->type()->value();
-    return script_type != Script::TYPE_NATIVE;
+  // Also, skip non-visible built-in functions and any call with the builtins
+  // object as receiver, so as to not reveal either the builtins object or
+  // an internal function.
+  // The --builtins-in-stack-traces command line flag allows including
+  // internal call sites in the stack trace for debugging purposes.
+  if (!FLAG_builtins_in_stack_traces) {
+    JSFunction* fun = JSFunction::cast(raw_fun);
+    if (frame->receiver()->IsJSBuiltinsObject() ||
+        (fun->IsBuiltin() && !fun->shared()->native())) {
+      return false;
+    }
   }
   return true;
 }
@@ -12991,18 +13300,20 @@
     // TODO(antonm): consider passing a receiver when constructing a cache.
     Handle<Object> receiver(isolate->global_context()->global());
     // This handle is nor shared, nor used later, so it's safe.
-    Object** argv[] = { key_handle.location() };
-    bool pending_exception = false;
+    Handle<Object> argv[] = { key_handle };
+    bool pending_exception;
     value = Execution::Call(factory,
                             receiver,
-                            1,
+                            ARRAY_SIZE(argv),
                             argv,
                             &pending_exception);
     if (pending_exception) return Failure::Exception();
   }
 
 #ifdef DEBUG
-  cache_handle->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    cache_handle->JSFunctionResultCacheVerify();
+  }
 #endif
 
   // Function invocation may have cleared the cache.  Reread all the data.
@@ -13031,7 +13342,9 @@
   cache_handle->set_finger_index(index);
 
 #ifdef DEBUG
-  cache_handle->JSFunctionResultCacheVerify();
+  if (FLAG_verify_heap) {
+    cache_handle->JSFunctionResultCacheVerify();
+  }
 #endif
 
   return *value;
@@ -13148,6 +13461,7 @@
     return isolate->heap()->ToBoolean(obj->Has##Name());  \
   }
 
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13164,6 +13478,14 @@
 
 #undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
 
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSObject, obj1, args[0]);
+  CONVERT_CHECKED(JSObject, obj2, args[1]);
+  return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
+}
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
@@ -13231,6 +13553,9 @@
   Isolate* isolate = Isolate::Current();
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
+    if (isolate->heap()->new_space()->AddFreshPage()) {
+      return;
+    }
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
     isolate->heap()->CollectGarbage(failure->allocation_space());
@@ -13238,7 +13563,7 @@
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     isolate->counters()->gc_last_resort_from_js()->Increment();
-    isolate->heap()->CollectAllGarbage(false);
+    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
   }
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index 1538b7d..b13662d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -69,7 +69,6 @@
   \
   F(GetPrototype, 1, 1) \
   F(IsInPrototypeChain, 2, 1) \
-  F(SetHiddenPrototype, 2, 1) \
   \
   F(IsConstructCall, 0, 1) \
   \
@@ -80,6 +79,7 @@
   \
   /* Utilities */ \
   F(CheckIsBootstrapping, 0, 1) \
+  F(Call, -1 /* >= 2 */, 1) \
   F(Apply, 5, 1) \
   F(GetFunctionDelegate, 1, 1) \
   F(GetConstructorDelegate, 1, 1) \
@@ -98,6 +98,7 @@
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(SetNativeFlag, 1, 1) \
+  F(StoreArrayLiteralElement, 5, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -142,7 +143,7 @@
   F(StringAdd, 2, 1) \
   F(StringBuilderConcat, 3, 1) \
   F(StringBuilderJoin, 3, 1) \
-  F(SparseJoinWithSeparator, 3, 1)            \
+  F(SparseJoinWithSeparator, 3, 1) \
   \
   /* Bit operations */ \
   F(NumberOr, 2, 1) \
@@ -211,14 +212,14 @@
   /* Reflection */ \
   F(FunctionSetInstanceClassName, 2, 1) \
   F(FunctionSetLength, 2, 1) \
-  F(BoundFunctionSetLength, 2, 1)    \
   F(FunctionSetPrototype, 2, 1) \
   F(FunctionSetReadOnlyPrototype, 1, 1) \
   F(FunctionGetName, 1, 1) \
   F(FunctionSetName, 2, 1) \
   F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
   F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
-  F(FunctionSetBound, 1, 1) \
+  F(FunctionBindArguments, 4, 1) \
+  F(BoundFunctionGetBindings, 1, 1) \
   F(FunctionRemovePrototype, 1, 1) \
   F(FunctionGetSourceCode, 1, 1) \
   F(FunctionGetScript, 1, 1) \
@@ -246,7 +247,7 @@
   F(DateLocalTimezone, 1, 1) \
   F(DateLocalTimeOffset, 0, 1) \
   F(DateDaylightSavingsOffset, 1, 1) \
-  F(DateMakeDay, 3, 1) \
+  F(DateMakeDay, 2, 1) \
   F(DateYMDFromTime, 2, 1) \
   \
   /* Numbers */ \
@@ -257,8 +258,7 @@
   \
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
-  F(ResolvePossiblyDirectEval, 4, 2) \
-  F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
+  F(ResolvePossiblyDirectEval, 5, 2) \
   \
   F(SetProperty, -1 /* 4 or 5 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
@@ -278,9 +278,6 @@
   \
   /* Literals */ \
   F(MaterializeRegExpLiteral, 4, 1)\
-  F(CreateArrayLiteralBoilerplate, 3, 1) \
-  F(CloneLiteralBoilerplate, 1, 1) \
-  F(CloneShallowLiteralBoilerplate, 1, 1) \
   F(CreateObjectLiteral, 4, 1) \
   F(CreateObjectLiteralShallow, 4, 1) \
   F(CreateArrayLiteral, 3, 1) \
@@ -296,6 +293,17 @@
   F(GetConstructTrap, 1, 1) \
   F(Fix, 1, 1) \
   \
+  /* Harmony sets */ \
+  F(SetInitialize, 1, 1) \
+  F(SetAdd, 2, 1) \
+  F(SetHas, 2, 1) \
+  F(SetDelete, 2, 1) \
+  \
+  /* Harmony maps */ \
+  F(MapInitialize, 1, 1) \
+  F(MapGet, 2, 1) \
+  F(MapSet, 3, 1) \
+  \
   /* Harmony weakmaps */ \
   F(WeakMapInitialize, 1, 1) \
   F(WeakMapGet, 2, 1) \
@@ -304,7 +312,7 @@
   /* Statements */ \
   F(NewClosure, 3, 1) \
   F(NewObject, 1, 1) \
-  F(NewObjectFromBound, 2, 1) \
+  F(NewObjectFromBound, 1, 1) \
   F(FinalizeInstanceSize, 1, 1) \
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
@@ -333,6 +341,7 @@
   /* Debugging */ \
   F(DebugPrint, 1, 1) \
   F(DebugTrace, 0, 1) \
+  F(TraceElementsKindTransition, 5, 1) \
   F(TraceEnter, 0, 1) \
   F(TraceExit, 1, 1) \
   F(Abort, 2, 1) \
@@ -354,6 +363,7 @@
   F(IS_VAR, 1, 1) \
   \
   /* expose boolean functions from objects-inl.h */ \
+  F(HasFastSmiOnlyElements, 1, 1) \
   F(HasFastElements, 1, 1) \
   F(HasFastDoubleElements, 1, 1) \
   F(HasDictionaryElements, 1, 1) \
@@ -367,6 +377,9 @@
   F(HasExternalUnsignedIntElements, 1, 1) \
   F(HasExternalFloatElements, 1, 1) \
   F(HasExternalDoubleElements, 1, 1) \
+  F(TransitionElementsSmiToDouble, 1, 1) \
+  F(TransitionElementsDoubleToObject, 1, 1) \
+  F(HaveSameMap, 2, 1) \
   /* profiler */ \
   F(ProfilerResume, 0, 1) \
   F(ProfilerPause, 0, 1)
@@ -492,6 +505,7 @@
   F(MathPow, 2, 1)                                                           \
   F(MathSin, 1, 1)                                                           \
   F(MathCos, 1, 1)                                                           \
+  F(MathTan, 1, 1)                                                           \
   F(MathSqrt, 1, 1)                                                          \
   F(MathLog, 1, 1)                                                           \
   F(IsRegExpEquivalent, 2, 1)                                                \
@@ -624,16 +638,14 @@
 
   static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
 
-  // TODO(1240886): The following three methods are *not* handle safe,
-  // but accept handle arguments. This seems fragile.
+  // TODO(1240886): Some of the following methods are *not* handle safe, but
+  // accept handle arguments. This seems fragile.
 
   // Support getting the characters in a string using [] notation as
   // in Firefox/SpiderMonkey, Safari and Opera.
   MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
                                                          Handle<Object> object,
                                                          uint32_t index);
-  MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
-                                                 uint32_t index);
 
   MUST_USE_RESULT static MaybeObject* SetObjectProperty(
       Isolate* isolate,
@@ -673,11 +685,9 @@
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
 
-enum kDeclareGlobalsFlags {
-  kDeclareGlobalsEvalFlag = 1 << 0,
-  kDeclareGlobalsStrictModeFlag = 1 << 1,
-  kDeclareGlobalsNativeFlag = 1 << 2
-};
+class DeclareGlobalsEvalFlag:     public BitField<bool,         0, 1> {};
+class DeclareGlobalsNativeFlag:   public BitField<bool,         1, 1> {};
+class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
 
 } }  // namespace v8::internal
 
diff --git a/src/runtime.js b/src/runtime.js
index 14ff1b6..d0cdb3e 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -355,7 +355,7 @@
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+  return %_IsNonNegativeSmi(this) ?
     %HasElement(x, this) : %HasProperty(x, %ToString(this));
 }
 
@@ -375,6 +375,12 @@
     return 1;
   }
 
+  // Check if function is bound, if so, get [[BoundFunction]] from it
+  // and use that instead of F.
+  var bindings = %BoundFunctionGetBindings(F);
+  if (bindings) {
+    F = bindings[kBoundFunctionIndex];  // Always a non-bound function.
+  }
   // Get the prototype of F; if it is not an object, throw an error.
   var O = F.prototype;
   if (!IS_SPEC_OBJECT(O)) {
@@ -386,13 +392,6 @@
 }
 
 
-// Get an array of property keys for the given object. Used in
-// for-in statements.
-function GET_KEYS() {
-  return %GetPropertyNames(this);
-}
-
-
 // Filter a given key against an object by checking if the object
 // has a property with the given key; return the key as a string if
 // it has. Otherwise returns 0 (smi). Used in for-in statements.
@@ -429,20 +428,10 @@
 }
 
 
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
-  var arity = %_ArgumentsLength() - 1;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
+  var proxy = this;
   var trap = %GetConstructTrap(proxy);
-  var receiver = void 0;
-  if (!IS_UNDEFINED(trap)) {
-    trap = %GetCallTrap(proxy);
-    var proto = proxy.prototype;
-    if (!IS_SPEC_OBJECT(proto) && proto !== null) {
-      throw MakeTypeError("proto_object_or_null", [proto]);
-    }
-    receiver = new global.Object();
-    receiver.__proto__ = proto;
-  }
-  return %Apply(trap, this, arguments, 1, arity);
+  return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
 }
 
 
@@ -469,11 +458,12 @@
   }
 
   if (!IS_SPEC_FUNCTION(this)) {
-    throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
+    throw %MakeTypeError('apply_non_function',
+                         [ %ToString(this), typeof this ]);
   }
 
   // Make sure the arguments list has the right type.
-  if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
+  if (args != null && !IS_SPEC_OBJECT(args)) {
     throw %MakeTypeError('apply_wrong_args', []);
   }
 
diff --git a/src/scanner.cc b/src/scanner.cc
index 69ea8ae..01fe81c 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,29 +36,25 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Scanner::LiteralScope
-
-Scanner::LiteralScope::LiteralScope(Scanner* self)
-    : scanner_(self), complete_(false) {
-  self->StartLiteral();
-}
-
-
-Scanner::LiteralScope::~LiteralScope() {
-  if (!complete_) scanner_->DropLiteral();
-}
-
-
-void Scanner::LiteralScope::Complete() {
-  scanner_->TerminateLiteral();
-  complete_ = true;
-}
-
-// ----------------------------------------------------------------------------
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache) { }
+    : unicode_cache_(unicode_cache),
+      octal_pos_(Location::invalid()),
+      harmony_scoping_(false) { }
+
+
+void Scanner::Initialize(UC16CharacterStream* source) {
+  source_ = source;
+  // Need to capture identifiers in order to recognize "get" and "set"
+  // in object literals.
+  Init();
+  // Skip initial whitespace allowing HTML comment ends just like
+  // after a newline and scan first token.
+  has_line_terminator_before_next_ = true;
+  SkipWhiteSpace();
+  Scan();
+}
 
 
 uc32 Scanner::ScanHexNumber(int expected_length) {
@@ -88,29 +84,6 @@
 }
 
 
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner
-
-JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
-    : Scanner(scanner_contants),
-      octal_pos_(Location::invalid()),
-      harmony_block_scoping_(false) { }
-
-
-void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
-  source_ = source;
-  // Need to capture identifiers in order to recognize "get" and "set"
-  // in object literals.
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
-
-
 // Ensure that tokens can be stored in a byte.
 STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
@@ -247,7 +220,7 @@
 };
 
 
-Token::Value JavaScriptScanner::Next() {
+Token::Value Scanner::Next() {
   current_ = next_;
   has_line_terminator_before_next_ = false;
   has_multiline_comment_before_next_ = false;
@@ -279,7 +252,7 @@
 }
 
 
-bool JavaScriptScanner::SkipWhiteSpace() {
+bool Scanner::SkipWhiteSpace() {
   int start_position = source_pos();
 
   while (true) {
@@ -319,7 +292,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipSingleLineComment() {
+Token::Value Scanner::SkipSingleLineComment() {
   Advance();
 
   // The line terminator at the end of the line is not considered
@@ -335,7 +308,7 @@
 }
 
 
-Token::Value JavaScriptScanner::SkipMultiLineComment() {
+Token::Value Scanner::SkipMultiLineComment() {
   ASSERT(c0_ == '*');
   Advance();
 
@@ -361,7 +334,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanHtmlComment() {
+Token::Value Scanner::ScanHtmlComment() {
   // Check for <!-- comments.
   ASSERT(c0_ == '!');
   Advance();
@@ -376,7 +349,7 @@
 }
 
 
-void JavaScriptScanner::Scan() {
+void Scanner::Scan() {
   next_.literal_chars = NULL;
   Token::Value token;
   do {
@@ -616,7 +589,7 @@
 }
 
 
-void JavaScriptScanner::SeekForward(int pos) {
+void Scanner::SeekForward(int pos) {
   // After this call, we will have the token at the given position as
   // the "next" token. The "current" token will be invalid.
   if (pos == next_.location.beg_pos) return;
@@ -637,7 +610,7 @@
 }
 
 
-void JavaScriptScanner::ScanEscape() {
+void Scanner::ScanEscape() {
   uc32 c = c0_;
   Advance();
 
@@ -689,7 +662,7 @@
 
 // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
 // ECMA-262. Other JS VMs support them.
-uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
   int i = 0;
   for (; i < length; i++) {
@@ -712,7 +685,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanString() {
+Token::Value Scanner::ScanString() {
   uc32 quote = c0_;
   Advance();  // consume quote
 
@@ -736,13 +709,13 @@
 }
 
 
-void JavaScriptScanner::ScanDecimalDigits() {
+void Scanner::ScanDecimalDigits() {
   while (IsDecimalDigit(c0_))
     AddLiteralCharAdvance();
 }
 
 
-Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
+Token::Value Scanner::ScanNumber(bool seen_period) {
   ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
   enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -827,7 +800,7 @@
 }
 
 
-uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
+uc32 Scanner::ScanIdentifierUnicodeEscape() {
   Advance();
   if (c0_ != 'u') return -1;
   Advance();
@@ -872,7 +845,7 @@
   KEYWORD("instanceof", Token::INSTANCEOF)                          \
   KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)          \
   KEYWORD_GROUP('l')                                                \
-  KEYWORD("let", harmony_block_scoping                              \
+  KEYWORD("let", harmony_scoping                                    \
                  ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
   KEYWORD_GROUP('n')                                                \
   KEYWORD("new", Token::NEW)                                        \
@@ -906,7 +879,7 @@
 
 static Token::Value KeywordOrIdentifierToken(const char* input,
                                              int input_length,
-                                             bool harmony_block_scoping) {
+                                             bool harmony_scoping) {
   ASSERT(input_length >= 1);
   const int kMinLength = 2;
   const int kMaxLength = 10;
@@ -944,7 +917,7 @@
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
+Token::Value Scanner::ScanIdentifierOrKeyword() {
   ASSERT(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   // Scan identifier start character.
@@ -982,14 +955,14 @@
     Vector<const char> chars = next_.literal_chars->ascii_literal();
     return KeywordOrIdentifierToken(chars.start(),
                                     chars.length(),
-                                    harmony_block_scoping_);
+                                    harmony_scoping_);
   }
 
   return Token::IDENTIFIER;
 }
 
 
-Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
   // Scan the rest of the identifier characters.
   while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
@@ -1012,7 +985,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
+bool Scanner::ScanRegExpPattern(bool seen_equal) {
   // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
   bool in_character_class = false;
 
@@ -1059,7 +1032,7 @@
 }
 
 
-bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
+bool Scanner::ScanLiteralUnicodeEscape() {
   ASSERT(c0_ == '\\');
   uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
   Advance();
@@ -1089,7 +1062,7 @@
 }
 
 
-bool JavaScriptScanner::ScanRegExpFlags() {
+bool Scanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
   while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/src/scanner.h b/src/scanner.h
index 16c3a42..c512ec3 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -41,6 +41,25 @@
 namespace v8 {
 namespace internal {
 
+
+// General collection of (multi-)bit-flags that can be passed to scanners and
+// parsers to signify their (initial) mode of operation.
+enum ParsingFlags {
+  kNoParsingFlags = 0,
+  // Embed LanguageMode values in parsing flags, i.e., equivalent to:
+  // CLASSIC_MODE = 0,
+  // STRICT_MODE,
+  // EXTENDED_MODE,
+  kLanguageModeMask = 0x03,
+  kAllowLazy = 4,
+  kAllowNativesSyntax = 8
+};
+
+STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
+STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
+STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
+
+
 // Returns the value (0 .. 15) of a hexadecimal character c.
 // If c is not a legal hexadecimal character, returns a value < 0.
 inline int HexValue(uc32 c) {
@@ -158,7 +177,7 @@
     }
   }
 
-  inline void AddChar(uc16 character) {
+  INLINE(void AddChar(uc16 character)) {
     if (position_ >= backing_store_.length()) ExpandBuffer();
     if (is_ascii_) {
       if (character < kMaxAsciiCharCodeU) {
@@ -249,35 +268,32 @@
 
 
 // ----------------------------------------------------------------------------
-// Scanner base-class.
+// JavaScript Scanner.
 
-// Generic functionality used by both JSON and JavaScript scanners.
 class Scanner {
  public:
-  // -1 is outside of the range of any real source code.
-  static const int kNoOctalLocation = -1;
-
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
+  // Scoped helper for literal recording. Automatically drops the literal
+  // if aborting the scanning before it's complete.
   class LiteralScope {
    public:
-    explicit LiteralScope(Scanner* self);
-    ~LiteralScope();
-    void Complete();
+    explicit LiteralScope(Scanner* self)
+        : scanner_(self), complete_(false) {
+      scanner_->StartLiteral();
+    }
+     ~LiteralScope() {
+       if (!complete_) scanner_->DropLiteral();
+     }
+    void Complete() {
+      scanner_->TerminateLiteral();
+      complete_ = true;
+    }
 
    private:
     Scanner* scanner_;
     bool complete_;
   };
 
-  explicit Scanner(UnicodeCache* scanner_contants);
-
-  // Returns the current token again.
-  Token::Value current_token() { return current_.token; }
-
-  // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const { return next_.token; }
-
+  // Representation of an interval of source positions.
   struct Location {
     Location(int b, int e) : beg_pos(b), end_pos(e) { }
     Location() : beg_pos(0), end_pos(0) { }
@@ -292,21 +308,28 @@
     int end_pos;
   };
 
-  // Returns the location information for the current token
-  // (the token returned by Next()).
-  Location location() const { return current_.location; }
-  Location peek_location() const { return next_.location; }
+  // -1 is outside of the range of any real source code.
+  static const int kNoOctalLocation = -1;
 
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
+  explicit Scanner(UnicodeCache* scanner_contants);
+
+  void Initialize(UC16CharacterStream* source);
+
+  // Returns the next token and advances input.
+  Token::Value Next();
+  // Returns the current token again.
+  Token::Value current_token() { return current_.token; }
+  // Returns the location information for the current token
+  // (the token last returned by Next()).
+  Location location() const { return current_.location; }
   // Returns the literal string, if any, for the current token (the
-  // token returned by Next()). The string is 0-terminated and in
-  // UTF-8 format; they may contain 0-characters. Literal strings are
-  // collected for identifiers, strings, and numbers.
+  // token last returned by Next()). The string is 0-terminated.
+  // Literal strings are collected for identifiers, strings, and
+  // numbers.
   // These functions only give the correct result if the literal
   // was scanned between calls to StartLiteral() and TerminateLiteral().
-  bool is_literal_ascii() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->is_ascii();
-  }
   Vector<const char> literal_ascii_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->ascii_literal();
@@ -315,6 +338,10 @@
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->uc16_literal();
   }
+  bool is_literal_ascii() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->is_ascii();
+  }
   int literal_length() const {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->length();
@@ -330,12 +357,15 @@
     return current_.literal_chars->length() != source_length;
   }
 
+  // Similar functions for the upcoming token.
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const { return next_.token; }
+
+  Location peek_location() const { return next_.location; }
+
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
-  bool is_next_literal_ascii() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->is_ascii();
-  }
   Vector<const char> next_literal_ascii_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->ascii_literal();
@@ -344,6 +374,10 @@
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->uc16_literal();
   }
+  bool is_next_literal_ascii() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->is_ascii();
+  }
   int next_literal_length() const {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->length();
@@ -353,7 +387,46 @@
 
   static const int kCharacterLookaheadBufferSize = 1;
 
- protected:
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal.
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  bool HarmonyScoping() const {
+    return harmony_scoping_;
+  }
+  void SetHarmonyScoping(bool block_scoping) {
+    harmony_scoping_ = block_scoping;
+  }
+
+
+  // Returns true if there was a line terminator before the peek'ed token,
+  // possibly inside a multi-line comment.
+  bool HasAnyLineTerminatorBeforeNext() const {
+    return has_line_terminator_before_next_ ||
+           has_multiline_comment_before_next_;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+ private:
   // The current and look-ahead token.
   struct TokenDesc {
     Token::Value token;
@@ -378,7 +451,7 @@
     next_.literal_chars = free_buffer;
   }
 
-  inline void AddLiteralChar(uc32 c) {
+  INLINE(void AddLiteralChar(uc32 c)) {
     ASSERT_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
@@ -423,6 +496,31 @@
 
   uc32 ScanHexNumber(int expected_length);
 
+  // Scans a single JavaScript token.
+  void Scan();
+
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
+
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifierOrKeyword();
+  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+
+  void ScanEscape();
+  Token::Value ScanString();
+
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadChar.
+  uc32 ScanIdentifierUnicodeEscape();
+  // Recognizes a uniocde escape-sequence and adds its characters,
+  // uninterpreted, to the current literal. Used for parsing RegExp
+  // flags.
+  bool ScanLiteralUnicodeEscape();
+
   // Return the current source position.
   int source_pos() {
     return source_->pos() - kCharacterLookaheadBufferSize;
@@ -440,113 +538,13 @@
   // Input stream. Must be initialized to an UC16CharacterStream.
   UC16CharacterStream* source_;
 
-  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
-  uc32 c0_;
-};
-
-// ----------------------------------------------------------------------------
-// JavaScriptScanner - base logic for JavaScript scanning.
-
-class JavaScriptScanner : public Scanner {
- public:
-  // A LiteralScope that disables recording of some types of JavaScript
-  // literals. If the scanner is configured to not record the specific
-  // type of literal, the scope will not call StartLiteral.
-  class LiteralScope {
-   public:
-    explicit LiteralScope(JavaScriptScanner* self)
-        : scanner_(self), complete_(false) {
-      scanner_->StartLiteral();
-    }
-     ~LiteralScope() {
-       if (!complete_) scanner_->DropLiteral();
-     }
-    void Complete() {
-      scanner_->TerminateLiteral();
-      complete_ = true;
-    }
-
-   private:
-    JavaScriptScanner* scanner_;
-    bool complete_;
-  };
-
-  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
-
-  void Initialize(UC16CharacterStream* source);
-
-  // Returns the next token.
-  Token::Value Next();
-
-  // Returns true if there was a line terminator before the peek'ed token,
-  // possibly inside a multi-line comment.
-  bool HasAnyLineTerminatorBeforeNext() const {
-    return has_line_terminator_before_next_ ||
-           has_multiline_comment_before_next_;
-  }
-
-  // Scans the input as a regular expression pattern, previous
-  // character(s) must be /(=). Returns true if a pattern is scanned.
-  bool ScanRegExpPattern(bool seen_equal);
-  // Returns true if regexp flags are scanned (always since flags can
-  // be empty).
-  bool ScanRegExpFlags();
-
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
-  // Returns the location of the last seen octal literal
-  Location octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
-  // Seek forward to the given position.  This operation does not
-  // work in general, for instance when there are pushed back
-  // characters, but works for seeking forward until simple delimiter
-  // tokens, which is what it is used for.
-  void SeekForward(int pos);
-
-  bool HarmonyBlockScoping() const {
-    return harmony_block_scoping_;
-  }
-  void SetHarmonyBlockScoping(bool block_scoping) {
-    harmony_block_scoping_ = block_scoping;
-  }
-
-
- protected:
-  bool SkipWhiteSpace();
-  Token::Value SkipSingleLineComment();
-  Token::Value SkipMultiLineComment();
-
-  // Scans a single JavaScript token.
-  void Scan();
-
-  void ScanDecimalDigits();
-  Token::Value ScanNumber(bool seen_period);
-  Token::Value ScanIdentifierOrKeyword();
-  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
-  void ScanEscape();
-  Token::Value ScanString();
-
-  // Scans a possible HTML comment -- begins with '<!'.
-  Token::Value ScanHtmlComment();
-
-  // Decodes a unicode escape-sequence which is part of an identifier.
-  // If the escape sequence cannot be decoded the result is kBadChar.
-  uc32 ScanIdentifierUnicodeEscape();
-  // Recognizes a uniocde escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
 
   // Start position of the octal literal last scanned.
   Location octal_pos_;
 
+  // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+  uc32 c0_;
+
   // Whether there is a line terminator whitespace character after
   // the current token, and  before the next. Does not count newlines
   // inside multiline comments.
@@ -556,7 +554,7 @@
   bool has_multiline_comment_before_next_;
   // Whether we scan 'let' as a keyword for harmony block scoped
   // let bindings.
-  bool harmony_block_scoping_;
+  bool harmony_scoping_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index ad31ca4..0f36234 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -38,456 +38,297 @@
 namespace internal {
 
 
-static int CompareLocal(Variable* const* v, Variable* const* w) {
-  int x = (*v)->index();
-  int y = (*w)->index();
-  // Consider sorting them according to type as well?
-  return x - y;
-}
+Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
+  // Collect stack and context locals.
+  ZoneList<Variable*> stack_locals(scope->StackLocalCount());
+  ZoneList<Variable*> context_locals(scope->ContextLocalCount());
+  scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
+  const int stack_local_count = stack_locals.length();
+  const int context_local_count = context_locals.length();
+  // Make sure we allocate the correct amount.
+  ASSERT(scope->StackLocalCount() == stack_local_count);
+  ASSERT(scope->ContextLocalCount() == context_local_count);
 
+  // Determine use and location of the function variable if it is present.
+  FunctionVariableInfo function_name_info;
+  VariableMode function_variable_mode;
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    Variable* var = scope->function()->var();
+    if (!var->is_used()) {
+      function_name_info = UNUSED;
+    } else if (var->IsContextSlot()) {
+      function_name_info = CONTEXT;
+    } else {
+      ASSERT(var->IsStackLocal());
+      function_name_info = STACK;
+    }
+    function_variable_mode = var->mode();
+  } else {
+    function_name_info = NONE;
+    function_variable_mode = VAR;
+  }
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
-    : function_name_(FACTORY->empty_symbol()),
-      calls_eval_(scope->calls_eval()),
-      is_strict_mode_(scope->is_strict_mode()),
-      parameters_(scope->num_parameters()),
-      stack_slots_(scope->num_stack_slots()),
-      context_slots_(scope->num_heap_slots()),
-      context_modes_(scope->num_heap_slots()) {
+  const bool has_function_name = function_name_info != NONE;
+  const int parameter_count = scope->num_parameters();
+  const int length = kVariablePartIndex
+      + parameter_count + stack_local_count + 2 * context_local_count
+      + (has_function_name ? 2 : 0);
+
+  Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
+
+  // Encode the flags.
+  int flags = TypeField::encode(scope->type()) |
+      CallsEvalField::encode(scope->calls_eval()) |
+      LanguageModeField::encode(scope->language_mode()) |
+      FunctionVariableField::encode(function_name_info) |
+      FunctionVariableMode::encode(function_variable_mode);
+  scope_info->SetFlags(flags);
+  scope_info->SetParameterCount(parameter_count);
+  scope_info->SetStackLocalCount(stack_local_count);
+  scope_info->SetContextLocalCount(context_local_count);
+
+  int index = kVariablePartIndex;
   // Add parameters.
-  for (int i = 0; i < scope->num_parameters(); i++) {
-    ASSERT(parameters_.length() == i);
-    parameters_.Add(scope->parameter(i)->name());
+  ASSERT(index == scope_info->ParameterEntriesIndex());
+  for (int i = 0; i < parameter_count; ++i) {
+    scope_info->set(index++, *scope->parameter(i)->name());
   }
 
-  // Add stack locals and collect heap locals.
-  // We are assuming that the locals' slots are allocated in
-  // increasing order, so we can simply add them to the
-  // ScopeInfo lists. However, due to usage analysis, this is
-  // not true for context-allocated locals: Some of them
-  // may be parameters which are allocated before the
-  // non-parameter locals. When the non-parameter locals are
-  // sorted according to usage, the allocated slot indices may
-  // not be in increasing order with the variable list anymore.
-  // Thus, we first collect the context-allocated locals, and then
-  // sort them by context slot index before adding them to the
-  // ScopeInfo list.
-  List<Variable*, Allocator> locals(32);  // 32 is a wild guess
-  ASSERT(locals.is_empty());
-  scope->CollectUsedVariables(&locals);
-  locals.Sort(&CompareLocal);
-
-  List<Variable*, Allocator> heap_locals(locals.length());
-  for (int i = 0; i < locals.length(); i++) {
-    Variable* var = locals[i];
-    if (var->is_used()) {
-      switch (var->location()) {
-        case Variable::UNALLOCATED:
-        case Variable::PARAMETER:
-          break;
-
-        case Variable::LOCAL:
-          ASSERT(stack_slots_.length() == var->index());
-          stack_slots_.Add(var->name());
-          break;
-
-        case Variable::CONTEXT:
-          heap_locals.Add(var);
-          break;
-
-        case Variable::LOOKUP:
-          // We don't expect lookup variables in the locals list.
-          UNREACHABLE();
-          break;
-      }
-    }
+  // Add stack locals' names. We are assuming that the stack locals'
+  // slots are allocated in increasing order, so we can simply add
+  // them to the ScopeInfo object.
+  ASSERT(index == scope_info->StackLocalEntriesIndex());
+  for (int i = 0; i < stack_local_count; ++i) {
+    ASSERT(stack_locals[i]->index() == i);
+    scope_info->set(index++, *stack_locals[i]->name());
   }
 
-  // Add heap locals.
-  if (scope->num_heap_slots() > 0) {
-    // Add user-defined slots.
-    for (int i = 0; i < heap_locals.length(); i++) {
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(heap_locals[i]->name());
-      context_modes_.Add(heap_locals[i]->mode());
-    }
+  // Due to usage analysis, context-allocated locals are not necessarily in
+  // increasing order: Some of them may be parameters which are allocated before
+  // the non-parameter locals. When the non-parameter locals are sorted
+  // according to usage, the allocated slot indices may not be in increasing
+  // order with the variable list anymore. Thus, we first need to sort them by
+  // context slot index before adding them to the ScopeInfo object.
+  context_locals.Sort(&Variable::CompareIndex);
 
-  } else {
-    ASSERT(heap_locals.length() == 0);
+  // Add context locals' names.
+  ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    scope_info->set(index++, *context_locals[i]->name());
   }
 
-  // Add the function context slot, if present.
-  // For now, this must happen at the very end because of the
-  // ordering of the scope info slots and the respective slot indices.
-  if (scope->is_function_scope()) {
-    VariableProxy* proxy = scope->function();
-    if (proxy != NULL &&
-        proxy->var()->is_used() &&
-        proxy->var()->IsContextSlot()) {
-      function_name_ = proxy->name();
-      // Note that we must not find the function name in the context slot
-      // list - instead it must be handled separately in the
-      // Contexts::Lookup() function. Thus record an empty symbol here so we
-      // get the correct number of context slots.
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_slots_.length());
-      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
-             context_modes_.length());
-      context_slots_.Add(FACTORY->empty_symbol());
-      context_modes_.Add(Variable::INTERNAL);
-    }
+  // Add context locals' info.
+  ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
+  for (int i = 0; i < context_local_count; ++i) {
+    Variable* var = context_locals[i];
+    uint32_t value = ContextLocalMode::encode(var->mode()) |
+        ContextLocalInitFlag::encode(var->initialization_flag());
+    scope_info->set(index++, Smi::FromInt(value));
   }
-}
 
-
-// Encoding format in a FixedArray object:
-//
-// - function name
-//
-// - calls eval boolean flag
-//
-// - number of variables in the context object (smi) (= function context
-//   slot index + 1)
-// - list of pairs (name, Var mode) of context-allocated variables (starting
-//   with context slot 0)
-//
-// - number of parameters (smi)
-// - list of parameter names (starting with parameter 0 first)
-//
-// - number of variables on the stack (smi)
-// - list of names of stack-allocated variables (starting with stack slot 0)
-
-// The ScopeInfo representation could be simplified and the ScopeInfo
-// re-implemented (with almost the same interface). Here is a
-// suggestion for the new format:
-//
-// - have a single list with all variable names (parameters, stack locals,
-//   context locals), followed by a list of non-Object* values containing
-//   the variables information (what kind, index, attributes)
-// - searching the linear list of names is fast and yields an index into the
-//   list if the variable name is found
-// - that list index is then used to find the variable information in the
-//   subsequent list
-// - the list entries don't have to be in any particular order, so all the
-//   current sorting business can go away
-// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
-//   which returns all information at once
-// - when gathering the information from a Scope, we only need to iterate
-//   through the local variables (parameters and context info is already
-//   present)
-
-
-static inline Object** ReadInt(Object** p, int* x) {
-  *x = (reinterpret_cast<Smi*>(*p++))->value();
-  return p;
-}
-
-
-static inline Object** ReadBool(Object** p, bool* x) {
-  *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
-  return p;
-}
-
-
-static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
-  *s = Handle<String>(reinterpret_cast<String*>(*p++));
-  return p;
-}
-
-
-template <class Allocator>
-static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<String> s;
-    p = ReadSymbol(p, &s);
-    list->Add(s);
+  // If present, add the function variable name and its index.
+  ASSERT(index == scope_info->FunctionNameEntryIndex());
+  if (has_function_name) {
+    int var_index = scope->function()->var()->index();
+    scope_info->set(index++, *scope->function()->name());
+    scope_info->set(index++, Smi::FromInt(var_index));
+    ASSERT(function_name_info != STACK ||
+           (var_index == scope_info->StackLocalCount() &&
+            var_index == scope_info->StackSlotCount() - 1));
+    ASSERT(function_name_info != CONTEXT ||
+           var_index == scope_info->ContextLength() - 1);
   }
-  return p;
+
+  ASSERT(index == scope_info->length());
+  ASSERT(scope->num_parameters() == scope_info->ParameterCount());
+  ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
+  ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
+  return scope_info;
 }
 
 
-template <class Allocator>
-static Object** ReadList(Object** p,
-                         List<Handle<String>, Allocator>* list,
-                         List<Variable::Mode, Allocator>* modes) {
-  ASSERT(list->is_empty());
-  int n;
-  p = ReadInt(p, &n);
-  while (n-- > 0) {
-    Handle<String> s;
-    int m;
-    p = ReadSymbol(p, &s);
-    p = ReadInt(p, &m);
-    list->Add(s);
-    modes->Add(static_cast<Variable::Mode>(m));
-  }
-  return p;
+ScopeInfo* ScopeInfo::Empty() {
+  return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
 }
 
 
-template<class Allocator>
-ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
-  : function_name_(FACTORY->empty_symbol()),
-    parameters_(4),
-    stack_slots_(8),
-    context_slots_(8),
-    context_modes_(8) {
-  if (data->length() > 0) {
-    Object** p0 = data->data_start();
-    Object** p = p0;
-    p = ReadSymbol(p, &function_name_);
-    p = ReadBool(p, &calls_eval_);
-    p = ReadBool(p, &is_strict_mode_);
-    p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
-    p = ReadList<Allocator>(p, &parameters_);
-    p = ReadList<Allocator>(p, &stack_slots_);
-    ASSERT((p - p0) == FixedArray::cast(data)->length());
-  }
-}
-
-
-static inline Object** WriteInt(Object** p, int x) {
-  *p++ = Smi::FromInt(x);
-  return p;
-}
-
-
-static inline Object** WriteBool(Object** p, bool b) {
-  *p++ = Smi::FromInt(b ? 1 : 0);
-  return p;
-}
-
-
-static inline Object** WriteSymbol(Object** p, Handle<String> s) {
-  *p++ = *s;
-  return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteSymbol(p, list->at(i));
-  }
-  return p;
-}
-
-
-template <class Allocator>
-static Object** WriteList(Object** p,
-                          List<Handle<String>, Allocator>* list,
-                          List<Variable::Mode, Allocator>* modes) {
-  const int n = list->length();
-  p = WriteInt(p, n);
-  for (int i = 0; i < n; i++) {
-    p = WriteSymbol(p, list->at(i));
-    p = WriteInt(p, modes->at(i));
-  }
-  return p;
-}
-
-
-template<class Allocator>
-Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
-  // function name, calls eval, is_strict_mode, length for 3 tables:
-  const int extra_slots = 1 + 1 + 1 + 3;
-  int length = extra_slots +
-               context_slots_.length() * 2 +
-               parameters_.length() +
-               stack_slots_.length();
-
-  Handle<SerializedScopeInfo> data(
-      SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
-  AssertNoAllocation nogc;
-
-  Object** p0 = data->data_start();
-  Object** p = p0;
-  p = WriteSymbol(p, function_name_);
-  p = WriteBool(p, calls_eval_);
-  p = WriteBool(p, is_strict_mode_);
-  p = WriteList(p, &context_slots_, &context_modes_);
-  p = WriteList(p, &parameters_);
-  p = WriteList(p, &stack_slots_);
-  ASSERT((p - p0) == length);
-
-  return data;
-}
-
-
-template<class Allocator>
-Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
-  // A local variable can be allocated either on the stack or in the context.
-  // For variables allocated in the context they are always preceded by
-  // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
-  if (i < number_of_stack_slots()) {
-    return stack_slot_name(i);
-  } else {
-    return context_slot_name(i - number_of_stack_slots() +
-                             Context::MIN_CONTEXT_SLOTS);
-  }
-}
-
-
-template<class Allocator>
-int ScopeInfo<Allocator>::NumberOfLocals() const {
-  int number_of_locals = number_of_stack_slots();
-  if (number_of_context_slots() > 0) {
-    ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
-    number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
-  }
-  return number_of_locals;
-}
-
-
-Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
-  ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
-  return sinfo.Serialize();
-}
-
-
-SerializedScopeInfo* SerializedScopeInfo::Empty() {
-  return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-Object** SerializedScopeInfo::ContextEntriesAddr() {
+ScopeType ScopeInfo::Type() {
   ASSERT(length() > 0);
-  // +3 for function name, calls eval, strict mode.
-  return data_start() + 3;
+  return TypeField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::ParameterEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ContextEntriesAddr();
-  int number_of_context_slots;
-  p = ReadInt(p, &number_of_context_slots);
-  return p + number_of_context_slots*2;  // *2 for pairs
+bool ScopeInfo::CallsEval() {
+  return length() > 0 && CallsEvalField::decode(Flags());
 }
 
 
-Object** SerializedScopeInfo::StackSlotEntriesAddr() {
-  ASSERT(length() > 0);
-  Object** p = ParameterEntriesAddr();
-  int number_of_parameter_slots;
-  p = ReadInt(p, &number_of_parameter_slots);
-  return p + number_of_parameter_slots;
+LanguageMode ScopeInfo::language_mode() {
+  return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
 }
 
 
-bool SerializedScopeInfo::CallsEval() {
+int ScopeInfo::LocalCount() {
+  return StackLocalCount() + ContextLocalCount();
+}
+
+
+int ScopeInfo::StackSlotCount() {
   if (length() > 0) {
-    Object** p = data_start() + 1;  // +1 for function name.
-    bool calls_eval;
-    p = ReadBool(p, &calls_eval);
-    return calls_eval;
-  }
-  return false;
-}
-
-
-bool SerializedScopeInfo::IsStrictMode() {
-  if (length() > 0) {
-    Object** p = data_start() + 2;  // +2 for function name, calls eval.
-    bool strict_mode;
-    p = ReadBool(p, &strict_mode);
-    return strict_mode;
-  }
-  return false;
-}
-
-
-int SerializedScopeInfo::NumberOfStackSlots() {
-  if (length() > 0) {
-    Object** p = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    ReadInt(p, &number_of_stack_slots);
-    return number_of_stack_slots;
+    bool function_name_stack_slot =
+        FunctionVariableField::decode(Flags()) == STACK;
+    return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
   }
   return 0;
 }
 
 
-int SerializedScopeInfo::NumberOfContextSlots() {
+int ScopeInfo::ContextLength() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
+    int context_locals = ContextLocalCount();
+    bool function_name_context_slot =
+        FunctionVariableField::decode(Flags()) == CONTEXT;
+    bool has_context = context_locals > 0 ||
+        function_name_context_slot ||
+        Type() == WITH_SCOPE ||
+        (Type() == FUNCTION_SCOPE && CallsEval());
+    if (has_context) {
+      return Context::MIN_CONTEXT_SLOTS + context_locals +
+          (function_name_context_slot ? 1 : 0);
+    }
   }
   return 0;
 }
 
 
-bool SerializedScopeInfo::HasHeapAllocatedLocals() {
+bool ScopeInfo::HasFunctionName() {
   if (length() > 0) {
-    Object** p = ContextEntriesAddr();
-    int number_of_context_slots;
-    ReadInt(p, &number_of_context_slots);
-    return number_of_context_slots > 0;
+    return NONE != FunctionVariableField::decode(Flags());
+  } else {
+    return false;
   }
-  return false;
 }
 
 
-int SerializedScopeInfo::StackSlotIndex(String* name) {
+bool ScopeInfo::HasHeapAllocatedLocals() {
+  if (length() > 0) {
+    return ContextLocalCount() > 0;
+  } else {
+    return false;
+  }
+}
+
+
+bool ScopeInfo::HasContext() {
+  if (length() > 0) {
+    return ContextLength() > 0;
+  } else {
+    return false;
+  }
+}
+
+
+String* ScopeInfo::FunctionName() {
+  ASSERT(HasFunctionName());
+  return String::cast(get(FunctionNameEntryIndex()));
+}
+
+
+String* ScopeInfo::ParameterName(int var) {
+  ASSERT(0 <= var && var < ParameterCount());
+  int info_index = ParameterEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::LocalName(int var) {
+  ASSERT(0 <= var && var < LocalCount());
+  ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
+         ContextLocalNameEntriesIndex());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::StackLocalName(int var) {
+  ASSERT(0 <= var && var < StackLocalCount());
+  int info_index = StackLocalEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+String* ScopeInfo::ContextLocalName(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalNameEntriesIndex() + var;
+  return String::cast(get(info_index));
+}
+
+
+VariableMode ScopeInfo::ContextLocalMode(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalMode::decode(value);
+}
+
+
+InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
+  ASSERT(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalInitFlag::decode(value);
+}
+
+
+int ScopeInfo::StackSlotIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = StackSlotEntriesAddr();
-    int number_of_stack_slots;
-    p0 = ReadInt(p0, &number_of_stack_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_stack_slots;
-    while (p != end) {
-      if (*p == name) return static_cast<int>(p - p0);
-      p++;
+    int start = StackLocalEntriesIndex();
+    int end = StackLocalEntriesIndex() + StackLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
-int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
+
+int ScopeInfo::ContextSlotIndex(String* name,
+                                VariableMode* mode,
+                                InitializationFlag* init_flag) {
   ASSERT(name->IsSymbol());
-  Isolate* isolate = GetIsolate();
-  int result = isolate->context_slot_cache()->Lookup(this, name, mode);
-  if (result != ContextSlotCache::kNotFound) return result;
+  ASSERT(mode != NULL);
+  ASSERT(init_flag != NULL);
   if (length() > 0) {
-    // Slots start after length entry.
-    Object** p0 = ContextEntriesAddr();
-    int number_of_context_slots;
-    p0 = ReadInt(p0, &number_of_context_slots);
-    Object** p = p0;
-    Object** end = p0 + number_of_context_slots * 2;
-    while (p != end) {
-      if (*p == name) {
-        ASSERT(((p - p0) & 1) == 0);
-        int v;
-        ReadInt(p + 1, &v);
-        Variable::Mode mode_value = static_cast<Variable::Mode>(v);
-        if (mode != NULL) *mode = mode_value;
-        result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
-        isolate->context_slot_cache()->Update(this, name, mode_value, result);
+    ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
+    int result = context_slot_cache->Lookup(this, name, mode, init_flag);
+    if (result != ContextSlotCache::kNotFound) {
+      ASSERT(result < ContextLength());
+      return result;
+    }
+
+    int start = ContextLocalNameEntriesIndex();
+    int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
+    for (int i = start; i < end; ++i) {
+      if (name == get(i)) {
+        int var = i - start;
+        *mode = ContextLocalMode(var);
+        *init_flag = ContextLocalInitFlag(var);
+        result = Context::MIN_CONTEXT_SLOTS + var;
+        context_slot_cache->Update(this, name, *mode, *init_flag, result);
+        ASSERT(result < ContextLength());
         return result;
       }
-      p += 2;
     }
+    context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
   }
-  isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
   return -1;
 }
 
 
-int SerializedScopeInfo::ParameterIndex(String* name) {
+int ScopeInfo::ParameterIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
     // We must read parameters from the end since for
@@ -495,41 +336,58 @@
     // last declaration of that parameter is used
     // inside a function (and thus we need to look
     // at the last index). Was bug# 1110337.
-    //
-    // Eventually, we should only register such parameters
-    // once, with corresponding index. This requires a new
-    // implementation of the ScopeInfo code. See also other
-    // comments in this file regarding this.
-    Object** p = ParameterEntriesAddr();
-    int number_of_parameter_slots;
-    Object** p0 = ReadInt(p, &number_of_parameter_slots);
-    p = p0 + number_of_parameter_slots;
-    while (p > p0) {
-      p--;
-      if (*p == name) return static_cast<int>(p - p0);
+    int start = ParameterEntriesIndex();
+    int end = ParameterEntriesIndex() + ParameterCount();
+    for (int i = end - 1; i >= start; --i) {
+      if (name == get(i)) {
+        return i - start;
+      }
     }
   }
   return -1;
 }
 
 
-int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
+int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
   ASSERT(name->IsSymbol());
+  ASSERT(mode != NULL);
   if (length() > 0) {
-    Object** p = data_start();
-    if (*p == name) {
-      p = ContextEntriesAddr();
-      int number_of_context_slots;
-      ReadInt(p, &number_of_context_slots);
-      ASSERT(number_of_context_slots != 0);
-      // The function context slot is the last entry.
-      return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
+    if (FunctionVariableField::decode(Flags()) == CONTEXT &&
+        FunctionName() == name) {
+      *mode = FunctionVariableMode::decode(Flags());
+      return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
     }
   }
   return -1;
 }
 
 
+int ScopeInfo::ParameterEntriesIndex() {
+  ASSERT(length() > 0);
+  return kVariablePartIndex;
+}
+
+
+int ScopeInfo::StackLocalEntriesIndex() {
+  return ParameterEntriesIndex() + ParameterCount();
+}
+
+
+int ScopeInfo::ContextLocalNameEntriesIndex() {
+  return StackLocalEntriesIndex() + StackLocalCount();
+}
+
+
+int ScopeInfo::ContextLocalInfoEntriesIndex() {
+  return ContextLocalNameEntriesIndex() + ContextLocalCount();
+}
+
+
+int ScopeInfo::FunctionNameEntryIndex() {
+  return ContextLocalInfoEntriesIndex() + ContextLocalCount();
+}
+
+
 int ContextSlotCache::Hash(Object* data, String* name) {
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
@@ -540,12 +398,14 @@
 
 int ContextSlotCache::Lookup(Object* data,
                              String* name,
-                             Variable::Mode* mode) {
+                             VariableMode* mode,
+                             InitializationFlag* init_flag) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
+    if (init_flag != NULL) *init_flag = result.initialization_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
@@ -554,7 +414,8 @@
 
 void ContextSlotCache::Update(Object* data,
                               String* name,
-                              Variable::Mode mode,
+                              VariableMode mode,
+                              InitializationFlag init_flag,
                               int slot_index) {
   String* symbol;
   ASSERT(slot_index > kNotFound);
@@ -564,9 +425,9 @@
     key.data = data;
     key.name = symbol;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, slot_index);
+    ValidateEntry(data, name, mode, init_flag, slot_index);
 #endif
   }
 }
@@ -581,7 +442,8 @@
 
 void ContextSlotCache::ValidateEntry(Object* data,
                                      String* name,
-                                     Variable::Mode mode,
+                                     VariableMode mode,
+                                     InitializationFlag init_flag,
                                      int slot_index) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
@@ -591,51 +453,56 @@
     ASSERT(key.name->Equals(name));
     Value result(values_[index]);
     ASSERT(result.mode() == mode);
+    ASSERT(result.initialization_flag() == init_flag);
     ASSERT(result.index() + kNotFound == slot_index);
   }
 }
 
 
-template <class Allocator>
 static void PrintList(const char* list_name,
                       int nof_internal_slots,
-                      List<Handle<String>, Allocator>& list) {
-  if (list.length() > 0) {
+                      int start,
+                      int end,
+                      ScopeInfo* scope_info) {
+  if (start < end) {
     PrintF("\n  // %s\n", list_name);
     if (nof_internal_slots > 0) {
       PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
     }
-    for (int i = 0; i < list.length(); i++) {
-      PrintF("  %2d ", i + nof_internal_slots);
-      list[i]->ShortPrint();
+    for (int i = nof_internal_slots; start < end; ++i, ++start) {
+      PrintF("  %2d ", i);
+      String::cast(scope_info->get(start))->ShortPrint();
       PrintF("\n");
     }
   }
 }
 
 
-template<class Allocator>
-void ScopeInfo<Allocator>::Print() {
+void ScopeInfo::Print() {
   PrintF("ScopeInfo ");
-  if (function_name_->length() > 0)
-    function_name_->ShortPrint();
-  else
+  if (HasFunctionName()) {
+    FunctionName()->ShortPrint();
+  } else {
     PrintF("/* no function name */");
+  }
   PrintF("{");
 
-  PrintList<Allocator>("parameters", 0, parameters_);
-  PrintList<Allocator>("stack slots", 0, stack_slots_);
-  PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
-                       context_slots_);
+  PrintList("parameters", 0,
+            ParameterEntriesIndex(),
+            ParameterEntriesIndex() + ParameterCount(),
+            this);
+  PrintList("stack slots", 0,
+            StackLocalEntriesIndex(),
+            StackLocalEntriesIndex() + StackLocalCount(),
+            this);
+  PrintList("context slots",
+            Context::MIN_CONTEXT_SLOTS,
+            ContextLocalNameEntriesIndex(),
+            ContextLocalNameEntriesIndex() + ContextLocalCount(),
+            this);
 
   PrintF("}\n");
 }
 #endif  // DEBUG
 
-
-// Make sure the classes get instantiated by the template system.
-template class ScopeInfo<FreeStoreAllocationPolicy>;
-template class ScopeInfo<PreallocatedStorage>;
-template class ScopeInfo<ZoneListAllocationPolicy>;
-
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 40c5c8a..93734f5 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -35,135 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// Scope information represents information about a functions's
-// scopes (currently only one, because we don't do any inlining)
-// and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in FixedArray objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-//
-// Historical note: In other VMs built by this team, ScopeInfo was
-// usually called DebugInfo since the information was used (among
-// other things) for on-demand debugging (Self, Smalltalk). However,
-// DebugInfo seems misleading, since this information is primarily used
-// in debugging-unrelated contexts.
-
-// Forward defined as
-// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-template<class Allocator>
-class ScopeInfo BASE_EMBEDDED {
- public:
-  // Create a ScopeInfo instance from a scope.
-  explicit ScopeInfo(Scope* scope);
-
-  // Create a ScopeInfo instance from SerializedScopeInfo.
-  explicit ScopeInfo(SerializedScopeInfo* data);
-
-  // Creates a SerializedScopeInfo holding the serialized scope info.
-  Handle<SerializedScopeInfo> Serialize();
-
-  // --------------------------------------------------------------------------
-  // Lookup
-
-  Handle<String> function_name() const { return function_name_; }
-
-  Handle<String> parameter_name(int i) const { return parameters_[i]; }
-  int number_of_parameters() const { return parameters_.length(); }
-
-  Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
-  int number_of_stack_slots() const { return stack_slots_.length(); }
-
-  Handle<String> context_slot_name(int i) const {
-    return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
-  }
-  int number_of_context_slots() const {
-    int l = context_slots_.length();
-    return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
-  }
-
-  Handle<String> LocalName(int i) const;
-  int NumberOfLocals() const;
-
-  // --------------------------------------------------------------------------
-  // Debugging support
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Handle<String> function_name_;
-  bool calls_eval_;
-  bool is_strict_mode_;
-  List<Handle<String>, Allocator > parameters_;
-  List<Handle<String>, Allocator > stack_slots_;
-  List<Handle<String>, Allocator > context_slots_;
-  List<Variable::Mode, Allocator > context_modes_;
-};
-
-
-// This object provides quick access to scope info details for runtime
-// routines w/o the need to explicitly create a ScopeInfo object.
-class SerializedScopeInfo : public FixedArray {
- public :
-
-  static SerializedScopeInfo* cast(Object* object) {
-    ASSERT(object->IsSerializedScopeInfo());
-    return reinterpret_cast<SerializedScopeInfo*>(object);
-  }
-
-  // Does this scope call eval?
-  bool CallsEval();
-
-  // Is this scope a strict mode scope?
-  bool IsStrictMode();
-
-  // Return the number of stack slots for code.
-  int NumberOfStackSlots();
-
-  // Return the number of context slots for code.
-  int NumberOfContextSlots();
-
-  // Return if this has context slots besides MIN_CONTEXT_SLOTS;
-  bool HasHeapAllocatedLocals();
-
-  // Lookup support for serialized scope info. Returns the
-  // the stack slot index for a given slot name if the slot is
-  // present; otherwise returns a value < 0. The name must be a symbol
-  // (canonicalized).
-  int StackSlotIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // context slot index for a given slot name if the slot is present; otherwise
-  // returns a value < 0. The name must be a symbol (canonicalized).
-  // If the slot is present and mode != NULL, sets *mode to the corresponding
-  // mode for that variable.
-  int ContextSlotIndex(String* name, Variable::Mode* mode);
-
-  // Lookup support for serialized scope info. Returns the
-  // parameter index for a given parameter name if the parameter is present;
-  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
-  int ParameterIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // function context slot index if the function name is present (named
-  // function expressions, only), otherwise returns a value < 0. The name
-  // must be a symbol (canonicalized).
-  int FunctionContextSlotIndex(String* name);
-
-  static Handle<SerializedScopeInfo> Create(Scope* scope);
-
-  // Serializes empty scope info.
-  static SerializedScopeInfo* Empty();
-
- private:
-  inline Object** ContextEntriesAddr();
-
-  inline Object** ParameterEntriesAddr();
-
-  inline Object** StackSlotEntriesAddr();
-};
-
-
 // Cache for mapping (data, property name) into context slot index.
 // The cache contains both positive and negative results.
 // Slot index equals -1 means the property is absent.
@@ -174,12 +45,14 @@
   // If absent, kNotFound is returned.
   int Lookup(Object* data,
              String* name,
-             Variable::Mode* mode);
+             VariableMode* mode,
+             InitializationFlag* init_flag);
 
   // Update an element in the cache.
   void Update(Object* data,
               String* name,
-              Variable::Mode mode,
+              VariableMode mode,
+              InitializationFlag init_flag,
               int slot_index);
 
   // Clear the cache.
@@ -201,7 +74,8 @@
 #ifdef DEBUG
   void ValidateEntry(Object* data,
                      String* name,
-                     Variable::Mode mode,
+                     VariableMode mode,
+                     InitializationFlag init_flag,
                      int slot_index);
 #endif
 
@@ -212,11 +86,17 @@
   };
 
   struct Value {
-    Value(Variable::Mode mode, int index) {
+    Value(VariableMode mode,
+          InitializationFlag init_flag,
+          int index) {
       ASSERT(ModeField::is_valid(mode));
+      ASSERT(InitField::is_valid(init_flag));
       ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) | IndexField::encode(index);
+      value_ = ModeField::encode(mode) |
+          IndexField::encode(index) |
+          InitField::encode(init_flag);
       ASSERT(mode == this->mode());
+      ASSERT(init_flag == this->initialization_flag());
       ASSERT(index == this->index());
     }
 
@@ -224,14 +104,20 @@
 
     uint32_t raw() { return value_; }
 
-    Variable::Mode mode() { return ModeField::decode(value_); }
+    VariableMode mode() { return ModeField::decode(value_); }
+
+    InitializationFlag initialization_flag() {
+      return InitField::decode(value_);
+    }
 
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<Variable::Mode, 0, 3> {};
-    class IndexField: public BitField<int,            3, 32-3> {};
+    class ModeField:  public BitField<VariableMode,       0, 3> {};
+    class InitField:  public BitField<InitializationFlag, 3, 1> {};
+    class IndexField: public BitField<int,                4, 32-4> {};
+
    private:
     uint32_t value_;
   };
diff --git a/src/scopes.cc b/src/scopes.cc
index d5a7a9f..ad8b6a5 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -55,7 +55,7 @@
 };
 
 
-static ZoneAllocator LocalsMapAllocator;
+static ZoneAllocator* LocalsMapAllocator = ::new ZoneAllocator();
 
 
 // ----------------------------------------------------------------------------
@@ -76,23 +76,27 @@
 }
 
 
-// Dummy constructor
-VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
-
-VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+VariableMap::VariableMap() : HashMap(Match, LocalsMapAllocator, 8) {}
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(Scope* scope,
-                               Handle<String> name,
-                               Variable::Mode mode,
-                               bool is_valid_lhs,
-                               Variable::Kind kind) {
+Variable* VariableMap::Declare(
+    Scope* scope,
+    Handle<String> name,
+    VariableMode mode,
+    bool is_valid_lhs,
+    Variable::Kind kind,
+    InitializationFlag initialization_flag) {
   HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name.location());
-    p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
+    p->value = new Variable(scope,
+                            name,
+                            mode,
+                            is_valid_lhs,
+                            kind,
+                            initialization_flag);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
@@ -112,22 +116,7 @@
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
-
-// Dummy constructor
-Scope::Scope(Type type)
-    : isolate_(Isolate::Current()),
-      inner_scopes_(0),
-      variables_(false),
-      temps_(0),
-      params_(0),
-      unresolved_(0),
-      decls_(0),
-      already_resolved_(false) {
-  SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
-}
-
-
-Scope::Scope(Scope* outer_scope, Type type)
+Scope::Scope(Scope* outer_scope, ScopeType type)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -136,18 +125,18 @@
       unresolved_(16),
       decls_(4),
       already_resolved_(false) {
-  SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
+  SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
-  ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
+  ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
   ASSERT(!HasIllegalRedeclaration());
 }
 
 
 Scope::Scope(Scope* inner_scope,
-             Type type,
-             Handle<SerializedScopeInfo> scope_info)
+             ScopeType type,
+             Handle<ScopeInfo> scope_info)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -156,11 +145,13 @@
       unresolved_(16),
       decls_(4),
       already_resolved_(true) {
-  ASSERT(!scope_info.is_null());
   SetDefaults(type, NULL, scope_info);
-  if (scope_info->HasHeapAllocatedLocals()) {
-    num_heap_slots_ = scope_info_->NumberOfContextSlots();
+  if (!scope_info.is_null()) {
+    num_heap_slots_ = scope_info_->ContextLength();
   }
+  // Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
+  num_heap_slots_ = Max(num_heap_slots_,
+                        static_cast<int>(Context::MIN_CONTEXT_SLOTS));
   AddInnerScope(inner_scope);
 }
 
@@ -174,21 +165,23 @@
       unresolved_(0),
       decls_(0),
       already_resolved_(true) {
-  SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+  SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
   AddInnerScope(inner_scope);
   ++num_var_or_const_;
+  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
   Variable* variable = variables_.Declare(this,
                                           catch_variable_name,
-                                          Variable::VAR,
+                                          VAR,
                                           true,  // Valid left-hand side.
-                                          Variable::NORMAL);
+                                          Variable::NORMAL,
+                                          kCreatedInitialized);
   AllocateHeapSlot(variable);
 }
 
 
-void Scope::SetDefaults(Type type,
+void Scope::SetDefaults(ScopeType type,
                         Scope* outer_scope,
-                        Handle<SerializedScopeInfo> scope_info) {
+                        Handle<ScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   type_ = type;
   scope_name_ = isolate_->factory()->empty_symbol();
@@ -201,53 +194,57 @@
   scope_contains_with_ = false;
   scope_calls_eval_ = false;
   // Inherit the strict mode from the parent scope.
-  strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
-  outer_scope_calls_eval_ = false;
+  language_mode_ = (outer_scope != NULL)
+      ? outer_scope->language_mode_ : CLASSIC_MODE;
   outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
-  outer_scope_is_eval_scope_ = false;
   force_eager_compilation_ = false;
   num_var_or_const_ = 0;
   num_stack_slots_ = 0;
   num_heap_slots_ = 0;
   scope_info_ = scope_info;
+  start_position_ = RelocInfo::kNoPosition;
+  end_position_ = RelocInfo::kNoPosition;
+  if (!scope_info.is_null()) {
+    scope_calls_eval_ = scope_info->CallsEval();
+    language_mode_ = scope_info->language_mode();
+  }
 }
 
 
-Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
-                                    Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
   // Reconstruct the outer scope chain from a closure's context chain.
-  ASSERT(!info->closure().is_null());
-  Context* context = info->closure()->context();
   Scope* current_scope = NULL;
   Scope* innermost_scope = NULL;
   bool contains_with = false;
   while (!context->IsGlobalContext()) {
     if (context->IsWithContext()) {
+      Scope* with_scope = new Scope(current_scope,
+                                    WITH_SCOPE,
+                                    Handle<ScopeInfo>::null());
+      current_scope = with_scope;
       // All the inner scopes are inside a with.
       contains_with = true;
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
+    } else if (context->IsFunctionContext()) {
+      ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+      current_scope = new Scope(current_scope,
+                                FUNCTION_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
+    } else if (context->IsBlockContext()) {
+      ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+      current_scope = new Scope(current_scope,
+                                BLOCK_SCOPE,
+                                Handle<ScopeInfo>(scope_info));
     } else {
-      if (context->IsFunctionContext()) {
-        SerializedScopeInfo* scope_info =
-            context->closure()->shared()->scope_info();
-        current_scope = new Scope(current_scope, FUNCTION_SCOPE,
-            Handle<SerializedScopeInfo>(scope_info));
-      } else if (context->IsBlockContext()) {
-        SerializedScopeInfo* scope_info =
-            SerializedScopeInfo::cast(context->extension());
-        current_scope = new Scope(current_scope, BLOCK_SCOPE,
-            Handle<SerializedScopeInfo>(scope_info));
-      } else {
-        ASSERT(context->IsCatchContext());
-        String* name = String::cast(context->extension());
-        current_scope = new Scope(current_scope, Handle<String>(name));
-      }
-      if (contains_with) current_scope->RecordWithStatement();
-      if (innermost_scope == NULL) innermost_scope = current_scope;
+      ASSERT(context->IsCatchContext());
+      String* name = String::cast(context->extension());
+      current_scope = new Scope(current_scope, Handle<String>(name));
     }
+    if (contains_with) current_scope->RecordWithStatement();
+    if (innermost_scope == NULL) innermost_scope = current_scope;
 
     // Forget about a with when we move to a context for a different function.
     if (context->previous()->closure() != context->closure()) {
@@ -257,39 +254,48 @@
   }
 
   global_scope->AddInnerScope(current_scope);
+  global_scope->PropagateScopeInfo(false);
   return (innermost_scope == NULL) ? global_scope : innermost_scope;
 }
 
 
 bool Scope::Analyze(CompilationInfo* info) {
   ASSERT(info->function() != NULL);
-  Scope* top = info->function()->scope();
+  Scope* scope = info->function()->scope();
+  Scope* top = scope;
 
-  while (top->outer_scope() != NULL) top = top->outer_scope();
-  top->AllocateVariables(info->calling_context());
+  // Traverse the scope tree up to the first unresolved scope or the global
+  // scope and start scope resolution and variable allocation from that scope.
+  while (!top->is_global_scope() &&
+         !top->outer_scope()->already_resolved()) {
+    top = top->outer_scope();
+  }
+
+  // Allocated the variables.
+  top->AllocateVariables(info->global_scope());
 
 #ifdef DEBUG
   if (info->isolate()->bootstrapper()->IsActive()
           ? FLAG_print_builtin_scopes
           : FLAG_print_scopes) {
-    info->function()->scope()->Print();
+    scope->Print();
   }
 #endif
 
-  info->SetScope(info->function()->scope());
+  info->SetScope(scope);
   return true;  // Can not fail.
 }
 
 
-void Scope::Initialize(bool inside_with) {
+void Scope::Initialize() {
   ASSERT(!already_resolved());
 
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
     outer_scope_->inner_scopes_.Add(this);
-    scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
+    scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
   } else {
-    scope_inside_with_ = inside_with;
+    scope_inside_with_ = is_with_scope();
   }
 
   // Declare convenience variables.
@@ -300,21 +306,19 @@
   // instead load them directly from the stack. Currently, the only
   // such parameter is 'this' which is passed on the stack when
   // invoking scripts
-  if (is_catch_scope() || is_block_scope()) {
-    ASSERT(outer_scope() != NULL);
-    receiver_ = outer_scope()->receiver();
-  } else {
-    ASSERT(is_function_scope() ||
-           is_global_scope() ||
-           is_eval_scope());
+  if (is_declaration_scope()) {
     Variable* var =
         variables_.Declare(this,
                            isolate_->factory()->this_symbol(),
-                           Variable::VAR,
+                           VAR,
                            false,
-                           Variable::THIS);
+                           Variable::THIS,
+                           kCreatedInitialized);
     var->AllocateTo(Variable::PARAMETER, -1);
     receiver_ = var;
+  } else {
+    ASSERT(outer_scope() != NULL);
+    receiver_ = outer_scope()->receiver();
   }
 
   if (is_function_scope()) {
@@ -323,9 +327,10 @@
     // allocated during variable allocation.
     variables_.Declare(this,
                        isolate_->factory()->arguments_symbol(),
-                       Variable::VAR,
+                       VAR,
                        true,
-                       Variable::ARGUMENTS);
+                       Variable::ARGUMENTS,
+                       kCreatedInitialized);
   }
 }
 
@@ -365,34 +370,50 @@
     return result;
   }
   // If we have a serialized scope info, we might find the variable there.
-  //
-  // We should never lookup 'arguments' in this scope as it is implicitly
-  // present in every scope.
-  ASSERT(*name != *isolate_->factory()->arguments_symbol());
   // There should be no local slot with the given name.
   ASSERT(scope_info_->StackSlotIndex(*name) < 0);
 
   // Check context slot lookup.
-  Variable::Mode mode;
-  int index = scope_info_->ContextSlotIndex(*name, &mode);
+  VariableMode mode;
+  InitializationFlag init_flag;
+  int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
   if (index < 0) {
     // Check parameters.
-    mode = Variable::VAR;
+    mode = VAR;
+    init_flag = kCreatedInitialized;
     index = scope_info_->ParameterIndex(*name);
-    if (index < 0) {
-      // Check the function name.
-      index = scope_info_->FunctionContextSlotIndex(*name);
-      if (index < 0) return NULL;
-    }
+    if (index < 0) return NULL;
   }
 
   Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+      variables_.Declare(this,
+                         name,
+                         mode,
+                         true,
+                         Variable::NORMAL,
+                         init_flag);
   var->AllocateTo(Variable::CONTEXT, index);
   return var;
 }
 
 
+Variable* Scope::LookupFunctionVar(Handle<String> name) {
+  if (function_ != NULL && function_->name().is_identical_to(name)) {
+    return function_->var();
+  } else if (!scope_info_.is_null()) {
+    // If we are backed by a scope info, try to lookup the variable there.
+    VariableMode mode;
+    int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
+    if (index < 0) return NULL;
+    Variable* var = DeclareFunctionVar(name, mode);
+    var->AllocateTo(Variable::CONTEXT, index);
+    return var;
+  } else {
+    return NULL;
+  }
+}
+
+
 Variable* Scope::Lookup(Handle<String> name) {
   for (Scope* scope = this;
        scope != NULL;
@@ -404,54 +425,59 @@
 }
 
 
-Variable* Scope::DeclareFunctionVar(Handle<String> name) {
+Variable* Scope::DeclareFunctionVar(Handle<String> name, VariableMode mode) {
   ASSERT(is_function_scope() && function_ == NULL);
-  Variable* function_var =
-      new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
+  Variable* function_var = new Variable(
+      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
   function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
   return function_var;
 }
 
 
-void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
+void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
   ASSERT(!already_resolved());
   ASSERT(is_function_scope());
-  Variable* var =
-      variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  Variable* var = variables_.Declare(
+      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
   params_.Add(var);
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name,
+                              VariableMode mode,
+                              InitializationFlag init_flag) {
   ASSERT(!already_resolved());
   // This function handles VAR and CONST modes.  DYNAMIC variables are
   // introduces during variable allocation, INTERNAL variables are allocated
   // explicitly, and TEMPORARY variables are allocated via NewTemporary().
-  ASSERT(mode == Variable::VAR ||
-         mode == Variable::CONST ||
-         mode == Variable::LET);
+  ASSERT(mode == VAR ||
+         mode == CONST ||
+         mode == CONST_HARMONY ||
+         mode == LET);
   ++num_var_or_const_;
-  return variables_.Declare(this, name, mode, true, Variable::NORMAL);
+  return
+      variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag);
 }
 
 
 Variable* Scope::DeclareGlobal(Handle<String> name) {
   ASSERT(is_global_scope());
-  return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
+  return variables_.Declare(this,
+                            name,
+                            DYNAMIC_GLOBAL,
                             true,
-                            Variable::NORMAL);
+                            Variable::NORMAL,
+                            kCreatedInitialized);
 }
 
 
-VariableProxy* Scope::NewUnresolved(Handle<String> name,
-                                    bool inside_with,
-                                    int position) {
+VariableProxy* Scope::NewUnresolved(Handle<String> name, int position) {
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
   ASSERT(!already_resolved());
   VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
-      isolate_, name, false, inside_with, position);
+      isolate_, name, false, position);
   unresolved_.Add(proxy);
   return proxy;
 }
@@ -473,9 +499,10 @@
   ASSERT(!already_resolved());
   Variable* var = new Variable(this,
                                name,
-                               Variable::TEMPORARY,
+                               TEMPORARY,
                                true,
-                               Variable::NORMAL);
+                               Variable::NORMAL,
+                               kCreatedInitialized);
   temps_.Add(var);
   return var;
 }
@@ -505,81 +532,68 @@
   int length = decls_.length();
   for (int i = 0; i < length; i++) {
     Declaration* decl = decls_[i];
-    if (decl->mode() != Variable::VAR) continue;
+    if (decl->mode() != VAR) continue;
     Handle<String> name = decl->proxy()->name();
-    bool cond = true;
-    for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
+
+    // Iterate through all scopes until and including the declaration scope.
+    Scope* previous = NULL;
+    Scope* current = decl->scope();
+    do {
       // There is a conflict if there exists a non-VAR binding.
-      Variable* other_var = scope->variables_.Lookup(name);
-      if (other_var != NULL && other_var->mode() != Variable::VAR) {
+      Variable* other_var = current->variables_.Lookup(name);
+      if (other_var != NULL && other_var->mode() != VAR) {
         return decl;
       }
-
-      // Include declaration scope in the iteration but stop after.
-      if (!scope->is_block_scope() && !scope->is_catch_scope()) cond = false;
-    }
+      previous = current;
+      current = current->outer_scope_;
+    } while (!previous->is_declaration_scope());
   }
   return NULL;
 }
 
 
-template<class Allocator>
-void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
-  // Collect variables in this scope.
-  // Note that the function_ variable - if present - is not
-  // collected here but handled separately in ScopeInfo
-  // which is the current user of this function).
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                         ZoneList<Variable*>* context_locals) {
+  ASSERT(stack_locals != NULL);
+  ASSERT(context_locals != NULL);
+
+  // Collect temporaries which are always allocated on the stack.
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
     if (var->is_used()) {
-      locals->Add(var);
+      ASSERT(var->IsStackLocal());
+      stack_locals->Add(var);
     }
   }
+
+  // Collect declared local variables.
   for (VariableMap::Entry* p = variables_.Start();
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
     if (var->is_used()) {
-      locals->Add(var);
+      if (var->IsStackLocal()) {
+        stack_locals->Add(var);
+      } else if (var->IsContextSlot()) {
+        context_locals->Add(var);
+      }
     }
   }
 }
 
 
-// Make sure the method gets instantiated by the template system.
-template void Scope::CollectUsedVariables(
-    List<Variable*, FreeStoreAllocationPolicy>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, PreallocatedStorage>* locals);
-template void Scope::CollectUsedVariables(
-    List<Variable*, ZoneListAllocationPolicy>* locals);
-
-
-void Scope::AllocateVariables(Handle<Context> context) {
-  ASSERT(outer_scope_ == NULL);  // eval or global scopes only
-
+void Scope::AllocateVariables(Scope* global_scope) {
   // 1) Propagate scope information.
-  // If we are in an eval scope, we may have other outer scopes about
-  // which we don't know anything at this point. Thus we must be conservative
-  // and assume they may invoke eval themselves. Eventually we could capture
-  // this information in the ScopeInfo and then use it here (by traversing
-  // the call chain stack, at compile time).
-
-  bool eval_scope = is_eval_scope();
-  bool outer_scope_calls_eval = false;
   bool outer_scope_calls_non_strict_eval = false;
-  if (!is_global_scope()) {
-    context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
-                                  &outer_scope_calls_non_strict_eval);
+  if (outer_scope_ != NULL) {
+    outer_scope_calls_non_strict_eval =
+        outer_scope_->outer_scope_calls_non_strict_eval() |
+        outer_scope_->calls_non_strict_eval();
   }
-  PropagateScopeInfo(outer_scope_calls_eval,
-                     outer_scope_calls_non_strict_eval,
-                     eval_scope);
+  PropagateScopeInfo(outer_scope_calls_non_strict_eval);
 
   // 2) Resolve variables.
-  Scope* global_scope = NULL;
-  if (is_global_scope()) global_scope = this;
-  ResolveVariablesRecursively(global_scope, context);
+  ResolveVariablesRecursively(global_scope);
 
   // 3) Allocate variables.
   AllocateVariablesRecursively();
@@ -627,30 +641,48 @@
 
 Scope* Scope::DeclarationScope() {
   Scope* scope = this;
-  while (scope->is_catch_scope() ||
-         scope->is_block_scope()) {
+  while (!scope->is_declaration_scope()) {
     scope = scope->outer_scope();
   }
   return scope;
 }
 
 
-Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
+Handle<ScopeInfo> Scope::GetScopeInfo() {
   if (scope_info_.is_null()) {
-    scope_info_ = SerializedScopeInfo::Create(this);
+    scope_info_ = ScopeInfo::Create(this);
   }
   return scope_info_;
 }
 
 
+void Scope::GetNestedScopeChain(
+    List<Handle<ScopeInfo> >* chain,
+    int position) {
+  if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
+
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    Scope* scope = inner_scopes_[i];
+    int beg_pos = scope->start_position();
+    int end_pos = scope->end_position();
+    ASSERT(beg_pos >= 0 && end_pos >= 0);
+    if (beg_pos <= position && position < end_pos) {
+      scope->GetNestedScopeChain(chain, position);
+      return;
+    }
+  }
+}
+
+
 #ifdef DEBUG
-static const char* Header(Scope::Type type) {
+static const char* Header(ScopeType type) {
   switch (type) {
-    case Scope::EVAL_SCOPE: return "eval";
-    case Scope::FUNCTION_SCOPE: return "function";
-    case Scope::GLOBAL_SCOPE: return "global";
-    case Scope::CATCH_SCOPE: return "catch";
-    case Scope::BLOCK_SCOPE: return "block";
+    case EVAL_SCOPE: return "eval";
+    case FUNCTION_SCOPE: return "function";
+    case GLOBAL_SCOPE: return "global";
+    case CATCH_SCOPE: return "catch";
+    case BLOCK_SCOPE: return "block";
+    case WITH_SCOPE: return "with";
   }
   UNREACHABLE();
   return NULL;
@@ -695,9 +727,9 @@
     PrintName(var->name());
     PrintF(";  // ");
     PrintLocation(var);
-    if (var->is_accessed_from_inner_scope()) {
+    if (var->has_forced_context_allocation()) {
       if (!var->IsUnallocated()) PrintF(", ");
-      PrintF("inner scope access");
+      PrintF("forced context allocation");
     }
     PrintF("\n");
   }
@@ -733,7 +765,7 @@
     PrintF(")");
   }
 
-  PrintF(" {\n");
+  PrintF(" { // (%d, %d)\n", start_position(), end_position());
 
   // Function name, if any (named function literals, only).
   if (function_ != NULL) {
@@ -746,18 +778,23 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
-  if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
+  switch (language_mode()) {
+    case CLASSIC_MODE:
+      break;
+    case STRICT_MODE:
+      Indent(n1, "// strict mode scope\n");
+      break;
+    case EXTENDED_MODE:
+      Indent(n1, "// extended mode scope\n");
+      break;
+  }
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
-  if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
   if (outer_scope_calls_non_strict_eval_) {
     Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
   }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
-  if (outer_scope_is_eval_scope_) {
-    Indent(n1, "// outer scope is 'eval' scope\n");
-  }
   if (num_stack_slots_ > 0) { Indent(n1, "// ");
   PrintF("%d stack slots\n", num_stack_slots_); }
   if (num_heap_slots_ > 0) { Indent(n1, "// ");
@@ -779,9 +816,9 @@
 
   Indent(n1, "// dynamic vars\n");
   if (dynamics_ != NULL) {
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
-    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
+    PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -797,13 +834,20 @@
 #endif  // DEBUG
 
 
-Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
   if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
   VariableMap* map = dynamics_->GetMap(mode);
   Variable* var = map->Lookup(name);
   if (var == NULL) {
     // Declare a new non-local.
-    var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
+    InitializationFlag init_flag = (mode == VAR)
+        ? kCreatedInitialized : kNeedsInitialization;
+    var = map->Declare(NULL,
+                       name,
+                       mode,
+                       true,
+                       Variable::NORMAL,
+                       init_flag);
     // Allocate it by giving it a dynamic lookup.
     var->AllocateTo(Variable::LOOKUP, -1);
   }
@@ -811,80 +855,61 @@
 }
 
 
-// Lookup a variable starting with this scope. The result is either
-// the statically resolved variable belonging to an outer scope, or
-// NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by
-// another variable that is introduced dynamically via an 'eval' call
-// or a 'with' statement).
 Variable* Scope::LookupRecursive(Handle<String> name,
-                                 bool from_inner_scope,
-                                 Variable** invalidated_local) {
-  // If we find a variable, but the current scope calls 'eval', the found
-  // variable may not be the correct one (the 'eval' may introduce a
-  // property with the same name). In that case, remember that the variable
-  // found is just a guess.
-  bool guess = scope_calls_eval_;
-
+                                 BindingKind* binding_kind) {
+  ASSERT(binding_kind != NULL);
   // Try to find the variable in this scope.
   Variable* var = LocalLookup(name);
 
+  // We found a variable and we are done. (Even if there is an 'eval' in
+  // this scope which introduces the same variable again, the resulting
+  // variable remains the same.)
   if (var != NULL) {
-    // We found a variable. If this is not an inner lookup, we are done.
-    // (Even if there is an 'eval' in this scope which introduces the
-    // same variable again, the resulting variable remains the same.
-    // Note that enclosing 'with' statements are handled at the call site.)
-    if (!from_inner_scope)
-      return var;
+    *binding_kind = BOUND;
+    return var;
+  }
 
-  } else {
-    // We did not find a variable locally. Check against the function variable,
-    // if any. We can do this for all scopes, since the function variable is
-    // only present - if at all - for function scopes.
-    //
-    // This lookup corresponds to a lookup in the "intermediate" scope sitting
-    // between this scope and the outer scope. (ECMA-262, 3rd., requires that
-    // the name of named function literal is kept in an intermediate scope
-    // in between this scope and the next outer scope.)
-    if (function_ != NULL && function_->name().is_identical_to(name)) {
-      var = function_->var();
-
-    } else if (outer_scope_ != NULL) {
-      var = outer_scope_->LookupRecursive(name, true, invalidated_local);
-      // We may have found a variable in an outer scope. However, if
-      // the current scope is inside a 'with', the actual variable may
-      // be a property introduced via the 'with' statement. Then, the
-      // variable we may have found is just a guess.
-      if (scope_inside_with_)
-        guess = true;
+  // We did not find a variable locally. Check against the function variable,
+  // if any. We can do this for all scopes, since the function variable is
+  // only present - if at all - for function scopes.
+  *binding_kind = UNBOUND;
+  var = LookupFunctionVar(name);
+  if (var != NULL) {
+    *binding_kind = BOUND;
+  } else if (outer_scope_ != NULL) {
+    var = outer_scope_->LookupRecursive(name, binding_kind);
+    if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
+      var->ForceContextAllocation();
     }
-
-    // If we did not find a variable, we are done.
-    if (var == NULL)
-      return NULL;
+  } else {
+    ASSERT(is_global_scope());
   }
 
-  ASSERT(var != NULL);
-
-  // If this is a lookup from an inner scope, mark the variable.
-  if (from_inner_scope) {
-    var->MarkAsAccessedFromInnerScope();
+  if (is_with_scope()) {
+    // The current scope is a with scope, so the variable binding can not be
+    // statically resolved. However, note that it was necessary to do a lookup
+    // in the outer scope anyway, because if a binding exists in an outer scope,
+    // the associated variable has to be marked as potentially being accessed
+    // from inside of an inner with scope (the property may not be in the 'with'
+    // object).
+    *binding_kind = DYNAMIC_LOOKUP;
+    return NULL;
+  } else if (calls_non_strict_eval()) {
+    // A variable binding may have been found in an outer scope, but the current
+    // scope makes a non-strict 'eval' call, so the found variable may not be
+    // the correct one (the 'eval' may introduce a binding with the same name).
+    // In that case, change the lookup result to reflect this situation.
+    if (*binding_kind == BOUND) {
+      *binding_kind = BOUND_EVAL_SHADOWED;
+    } else if (*binding_kind == UNBOUND) {
+      *binding_kind = UNBOUND_EVAL_SHADOWED;
+    }
   }
-
-  // If the variable we have found is just a guess, invalidate the
-  // result. If the found variable is local, record that fact so we
-  // can generate fast code to get it if it is not shadowed by eval.
-  if (guess) {
-    if (!var->is_global()) *invalidated_local = var;
-    var = NULL;
-  }
-
   return var;
 }
 
 
 void Scope::ResolveVariable(Scope* global_scope,
-                            Handle<Context> context,
                             VariableProxy* proxy) {
   ASSERT(global_scope == NULL || global_scope->is_global_scope());
 
@@ -893,116 +918,73 @@
   if (proxy->var() != NULL) return;
 
   // Otherwise, try to resolve the variable.
-  Variable* invalidated_local = NULL;
-  Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
+  BindingKind binding_kind;
+  Variable* var = LookupRecursive(proxy->name(), &binding_kind);
+  switch (binding_kind) {
+    case BOUND:
+      // We found a variable binding.
+      break;
 
-  if (proxy->inside_with()) {
-    // If we are inside a local 'with' statement, all bets are off
-    // and we cannot resolve the proxy to a local variable even if
-    // we found an outer matching variable.
-    // Note that we must do a lookup anyway, because if we find one,
-    // we must mark that variable as potentially accessed from this
-    // inner scope (the property may not be in the 'with' object).
-    var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
-  } else {
-    // We are not inside a local 'with' statement.
-
-    if (var == NULL) {
-      // We did not find the variable. We have a global variable
-      // if we are in the global scope (we know already that we
-      // are outside a 'with' statement) or if there is no way
-      // that the variable might be introduced dynamically (through
-      // a local or outer eval() call, or an outer 'with' statement),
-      // or we don't know about the outer scope (because we are
-      // in an eval scope).
-      if (is_global_scope() ||
-          !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
-            scope_calls_eval_ || outer_scope_calls_eval_)) {
-        // We must have a global variable.
-        ASSERT(global_scope != NULL);
-        var = global_scope->DeclareGlobal(proxy->name());
-
-      } else if (scope_inside_with_) {
-        // If we are inside a with statement we give up and look up
-        // the variable at runtime.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC);
-
-      } else if (invalidated_local != NULL) {
-        // No with statements are involved and we found a local
-        // variable that might be shadowed by eval introduced
-        // variables.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
-        var->set_local_if_not_shadowed(invalidated_local);
-
-      } else if (outer_scope_is_eval_scope_) {
-        // No with statements and we did not find a local and the code
-        // is executed with a call to eval.  The context contains
-        // scope information that we can use to determine if the
-        // variable is global if it is not shadowed by eval-introduced
-        // variables.
-        if (context->GlobalIfNotShadowedByEval(proxy->name())) {
-          var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
-
-        } else {
-          var = NonLocal(proxy->name(), Variable::DYNAMIC);
-        }
-
+    case BOUND_EVAL_SHADOWED:
+      // We found a variable variable binding that might be shadowed
+      // by 'eval' introduced variable bindings.
+      if (var->is_global()) {
+        var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
       } else {
-        // No with statements and we did not find a local and the code
-        // is not executed with a call to eval.  We know that this
-        // variable is global unless it is shadowed by eval-introduced
-        // variables.
-        var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+        Variable* invalidated = var;
+        var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
+        var->set_local_if_not_shadowed(invalidated);
       }
-    }
+      break;
+
+    case UNBOUND:
+      // No binding has been found. Declare a variable in global scope.
+      ASSERT(global_scope != NULL);
+      var = global_scope->DeclareGlobal(proxy->name());
+      break;
+
+    case UNBOUND_EVAL_SHADOWED:
+      // No binding has been found. But some scope makes a
+      // non-strict 'eval' call.
+      var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+      break;
+
+    case DYNAMIC_LOOKUP:
+      // The variable could not be resolved statically.
+      var = NonLocal(proxy->name(), DYNAMIC);
+      break;
   }
 
+  ASSERT(var != NULL);
   proxy->BindTo(var);
 }
 
 
-void Scope::ResolveVariablesRecursively(Scope* global_scope,
-                                        Handle<Context> context) {
+void Scope::ResolveVariablesRecursively(Scope* global_scope) {
   ASSERT(global_scope == NULL || global_scope->is_global_scope());
 
   // Resolve unresolved variables for this scope.
   for (int i = 0; i < unresolved_.length(); i++) {
-    ResolveVariable(global_scope, context, unresolved_[i]);
+    ResolveVariable(global_scope, unresolved_[i]);
   }
 
   // Resolve unresolved variables for inner scopes.
   for (int i = 0; i < inner_scopes_.length(); i++) {
-    inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
+    inner_scopes_[i]->ResolveVariablesRecursively(global_scope);
   }
 }
 
 
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
-                               bool outer_scope_calls_non_strict_eval,
-                               bool outer_scope_is_eval_scope) {
-  if (outer_scope_calls_eval) {
-    outer_scope_calls_eval_ = true;
-  }
-
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
   if (outer_scope_calls_non_strict_eval) {
     outer_scope_calls_non_strict_eval_ = true;
   }
 
-  if (outer_scope_is_eval_scope) {
-    outer_scope_is_eval_scope_ = true;
-  }
-
-  bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
-  bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
   bool calls_non_strict_eval =
-      (scope_calls_eval_ && !is_strict_mode()) ||
-      outer_scope_calls_non_strict_eval_;
+      this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_eval,
-                                        calls_non_strict_eval,
-                                        is_eval)) {
+    if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
       inner_scope_calls_eval_ = true;
     }
     if (inner_scope->force_eager_compilation_) {
@@ -1019,7 +1001,7 @@
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
-      (var->is_accessed_from_inner_scope() ||
+      (var->has_forced_context_allocation() ||
        scope_calls_eval_ ||
        inner_scope_calls_eval_ ||
        scope_contains_with_ ||
@@ -1040,9 +1022,9 @@
   //
   // Exceptions: temporary variables are never allocated in a context;
   // catch-bound variables are always allocated in a context.
-  if (var->mode() == Variable::TEMPORARY) return false;
+  if (var->mode() == TEMPORARY) return false;
   if (is_catch_scope() || is_block_scope()) return true;
-  return var->is_accessed_from_inner_scope() ||
+  return var->has_forced_context_allocation() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
       scope_contains_with_ ||
@@ -1095,7 +1077,7 @@
     // In strict mode 'arguments' does not alias formal parameters.
     // Therefore in strict mode we allocate parameters as if 'arguments'
     // were not used.
-    uses_nonstrict_arguments = !is_strict_mode();
+    uses_nonstrict_arguments = is_classic_mode();
   }
 
   // The same parameter may occur multiple times in the parameters_ list.
@@ -1106,9 +1088,8 @@
     Variable* var = params_[i];
     ASSERT(var->scope() == this);
     if (uses_nonstrict_arguments) {
-      // Give the parameter a use from an inner scope, to force allocation
-      // to the context.
-      var->MarkAsAccessedFromInnerScope();
+      // Force context allocation of the parameter.
+      var->ForceContextAllocation();
     }
 
     if (MustAllocate(var)) {
@@ -1183,21 +1164,15 @@
   if (is_function_scope()) AllocateParameterLocals();
   AllocateNonParameterLocals();
 
-  // Allocate context if necessary.
-  bool must_have_local_context = false;
-  if (scope_calls_eval_ || scope_contains_with_) {
-    // The context for the eval() call or 'with' statement in this scope.
-    // Unless we are in the global or an eval scope, we need a local
-    // context even if we didn't statically allocate any locals in it,
-    // and the compiler will access the context variable. If we are
-    // not in an inner scope, the scope is provided from the outside.
-    must_have_local_context = is_function_scope();
-  }
+  // Force allocation of a context for this scope if necessary. For a 'with'
+  // scope and for a function scope that makes an 'eval' call we need a context,
+  // even if no local variables were statically allocated in the scope.
+  bool must_have_context = is_with_scope() ||
+      (is_function_scope() && calls_eval());
 
   // If we didn't allocate any locals in the local context, then we only
-  // need the minimal number of slots if we must have a local context.
-  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
-      !must_have_local_context) {
+  // need the minimal number of slots if we must have a context.
+  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
     num_heap_slots_ = 0;
   }
 
@@ -1205,4 +1180,17 @@
   ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
+
+int Scope::StackLocalCount() const {
+  return num_stack_slots() -
+      (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
+}
+
+
+int Scope::ContextLocalCount() const {
+  if (num_heap_slots() == 0) return 0;
+  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+      (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
+}
+
 } }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
index 2917a63..523a251 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -42,17 +42,14 @@
  public:
   VariableMap();
 
-  // Dummy constructor.  This constructor doesn't set up the map
-  // properly so don't use it unless you have a good reason.
-  explicit VariableMap(bool gotta_love_static_overloading);
-
   virtual ~VariableMap();
 
   Variable* Declare(Scope* scope,
                     Handle<String> name,
-                    Variable::Mode mode,
+                    VariableMode mode,
                     bool is_valid_lhs,
-                    Variable::Kind kind);
+                    Variable::Kind kind,
+                    InitializationFlag initialization_flag);
 
   Variable* Lookup(Handle<String> name);
 };
@@ -64,8 +61,8 @@
 // and setup time for scopes that don't need them.
 class DynamicScopePart : public ZoneObject {
  public:
-  VariableMap* GetMap(Variable::Mode mode) {
-    int index = mode - Variable::DYNAMIC;
+  VariableMap* GetMap(VariableMode mode) {
+    int index = mode - DYNAMIC;
     ASSERT(index >= 0 && index < 3);
     return &maps_[index];
   }
@@ -89,28 +86,19 @@
   // ---------------------------------------------------------------------------
   // Construction
 
-  enum Type {
-    EVAL_SCOPE,      // The top-level scope for an eval source.
-    FUNCTION_SCOPE,  // The top-level scope for a function.
-    GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
-    CATCH_SCOPE,     // The scope introduced by catch.
-    BLOCK_SCOPE      // The scope introduced by a new block.
-  };
-
-  Scope(Scope* outer_scope, Type type);
+  Scope(Scope* outer_scope, ScopeType type);
 
   // Compute top scope and allocate variables. For lazy compilation the top
   // scope only contains the single lazily compiled function, so this
   // doesn't re-allocate variables repeatedly.
   static bool Analyze(CompilationInfo* info);
 
-  static Scope* DeserializeScopeChain(CompilationInfo* info,
-                                      Scope* innermost_scope);
+  static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
 
   // The scope name is only used for printing/debugging.
   void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
-  void Initialize(bool inside_with);
+  void Initialize();
 
   // Checks if the block scope is redundant, i.e. it does not contain any
   // block scoped declarations. In that case it is removed from the scope
@@ -123,6 +111,12 @@
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
   Variable* LocalLookup(Handle<String> name);
 
+  // This lookup corresponds to a lookup in the "intermediate" scope sitting
+  // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+  // the name of named function literal is kept in an intermediate scope
+  // in between this scope and the next outer scope.)
+  Variable* LookupFunctionVar(Handle<String> name);
+
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
   Variable* Lookup(Handle<String> name);
@@ -130,16 +124,18 @@
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
-  Variable* DeclareFunctionVar(Handle<String> name);
+  Variable* DeclareFunctionVar(Handle<String> name, VariableMode mode);
 
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
-  void DeclareParameter(Handle<String> name, Variable::Mode mode);
+  void DeclareParameter(Handle<String> name, VariableMode mode);
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+  Variable* DeclareLocal(Handle<String> name,
+                         VariableMode mode,
+                         InitializationFlag init_flag);
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
@@ -149,7 +145,6 @@
 
   // Create a new unresolved variable.
   VariableProxy* NewUnresolved(Handle<String> name,
-                               bool inside_with,
                                int position = RelocInfo::kNoPosition);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
@@ -199,11 +194,42 @@
   void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall() { scope_calls_eval_ = true; }
+  void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
 
-  // Enable strict mode for the scope (unless disabled by a global flag).
-  void EnableStrictMode() {
-    strict_mode_ = FLAG_strict_mode;
+  // Set the strict mode flag (unless disabled by a global flag).
+  void SetLanguageMode(LanguageMode language_mode) {
+    language_mode_ = language_mode;
+  }
+
+  // Position in the source where this scope begins and ends.
+  //
+  // * For the scope of a with statement
+  //     with (obj) stmt
+  //   start position: start position of first token of 'stmt'
+  //   end position: end position of last token of 'stmt'
+  // * For the scope of a block
+  //     { stmts }
+  //   start position: start position of '{'
+  //   end position: end position of '}'
+  // * For the scope of a function literal or decalaration
+  //     function fun(a,b) { stmts }
+  //   start position: start position of '('
+  //   end position: end position of '}'
+  // * For the scope of a catch block
+  //     try { stms } catch(e) { stmts }
+  //   start position: start position of '('
+  //   end position: end position of ')'
+  // * For the scope of a for-statement
+  //     for (let x ...) stmt
+  //   start position: start position of '('
+  //   end position: end position of last token of 'stmt'
+  int start_position() const { return start_position_; }
+  void set_start_position(int statement_pos) {
+    start_position_ = statement_pos;
+  }
+  int end_position() const { return end_position_; }
+  void set_end_position(int statement_pos) {
+    end_position_ = statement_pos;
   }
 
   // ---------------------------------------------------------------------------
@@ -215,14 +241,25 @@
   bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
   bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
   bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
-  bool is_strict_mode() const { return strict_mode_; }
-  bool is_strict_mode_eval_scope() const {
-    return is_eval_scope() && is_strict_mode();
+  bool is_with_scope() const { return type_ == WITH_SCOPE; }
+  bool is_declaration_scope() const {
+    return is_eval_scope() || is_function_scope() || is_global_scope();
+  }
+  bool is_classic_mode() const {
+    return language_mode() == CLASSIC_MODE;
+  }
+  bool is_extended_mode() const {
+    return language_mode() == EXTENDED_MODE;
+  }
+  bool is_strict_or_extended_eval_scope() const {
+    return is_eval_scope() && !is_classic_mode();
   }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
-  bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+  bool calls_non_strict_eval() {
+    return scope_calls_eval_ && is_classic_mode();
+  }
   bool outer_scope_calls_non_strict_eval() const {
     return outer_scope_calls_non_strict_eval_;
   }
@@ -238,6 +275,12 @@
   // ---------------------------------------------------------------------------
   // Accessors.
 
+  // The type of this scope.
+  ScopeType type() const { return type_; }
+
+  // The language mode of this scope.
+  LanguageMode language_mode() const { return language_mode_; }
+
   // The variable corresponding the 'this' value.
   Variable* receiver() { return receiver_; }
 
@@ -264,13 +307,17 @@
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
 
+  // Inner scope list.
+  ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
 
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
-  // Collect all used locals in this scope.
-  template<class Allocator>
-  void CollectUsedVariables(List<Variable*, Allocator>* locals);
+  // Collect stack and context allocated local variables in this scope. Note
+  // that the function variable - if present - is not collected and should be
+  // handled separately.
+  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                    ZoneList<Variable*>* context_locals);
 
   // Resolve and fill in the allocation information for all variables
   // in this scopes. Must be called *after* all scopes have been
@@ -280,7 +327,7 @@
   // In the case of code compiled and run using 'eval', the context
   // parameter is the context in which eval was called.  In all other
   // cases the context parameter is an empty handle.
-  void AllocateVariables(Handle<Context> context);
+  void AllocateVariables(Scope* global_scope);
 
   // Current number of var or const locals.
   int num_var_or_const() { return num_var_or_const_; }
@@ -289,6 +336,9 @@
   int num_stack_slots() const { return num_stack_slots_; }
   int num_heap_slots() const { return num_heap_slots_; }
 
+  int StackLocalCount() const;
+  int ContextLocalCount() const;
+
   // Make sure this scope and all outer scopes are eagerly compiled.
   void ForceEagerCompilation()  { force_eager_compilation_ = true; }
 
@@ -305,7 +355,14 @@
   // where var declarations will be hoisted to in the implementation.
   Scope* DeclarationScope();
 
-  Handle<SerializedScopeInfo> GetSerializedScopeInfo();
+  Handle<ScopeInfo> GetScopeInfo();
+
+  // Get the chain of nested scopes within this scope for the source statement
+  // position. The scopes will be added to the list from the outermost scope to
+  // the innermost scope. Only nested block, catch or with scopes are tracked
+  // and will be returned, but no inner function scopes.
+  void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
+                           int statement_position);
 
   // ---------------------------------------------------------------------------
   // Strict mode support.
@@ -330,8 +387,6 @@
  protected:
   friend class ParserFactory;
 
-  explicit Scope(Type type);
-
   Isolate* const isolate_;
 
   // Scope tree.
@@ -339,7 +394,7 @@
   ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
 
   // The scope type.
-  Type type_;
+  ScopeType type_;
 
   // Debugging support.
   Handle<String> scope_name_;
@@ -379,14 +434,15 @@
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
-  // This scope is a strict mode scope.
-  bool strict_mode_;
+  // The language mode of this scope.
+  LanguageMode language_mode_;
+  // Source positions.
+  int start_position_;
+  int end_position_;
 
   // Computed via PropagateScopeInfo.
-  bool outer_scope_calls_eval_;
   bool outer_scope_calls_non_strict_eval_;
   bool inner_scope_calls_eval_;
-  bool outer_scope_is_eval_scope_;
   bool force_eager_compilation_;
 
   // True if it doesn't need scope resolution (e.g., if the scope was
@@ -396,32 +452,75 @@
   // Computed as variables are declared.
   int num_var_or_const_;
 
-  // Computed via AllocateVariables; function scopes only.
+  // Computed via AllocateVariables; function, block and catch scopes only.
   int num_stack_slots_;
   int num_heap_slots_;
 
-  // Serialized scopes support.
-  Handle<SerializedScopeInfo> scope_info_;
+  // Serialized scope info support.
+  Handle<ScopeInfo> scope_info_;
   bool already_resolved() { return already_resolved_; }
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
-  Variable* NonLocal(Handle<String> name, Variable::Mode mode);
+  Variable* NonLocal(Handle<String> name, VariableMode mode);
 
   // Variable resolution.
+  // Possible results of a recursive variable lookup telling if and how a
+  // variable is bound. These are returned in the output parameter *binding_kind
+  // of the LookupRecursive function.
+  enum BindingKind {
+    // The variable reference could be statically resolved to a variable binding
+    // which is returned. There is no 'with' statement between the reference and
+    // the binding and no scope between the reference scope (inclusive) and
+    // binding scope (exclusive) makes a non-strict 'eval' call.
+    BOUND,
+
+    // The variable reference could be statically resolved to a variable binding
+    // which is returned. There is no 'with' statement between the reference and
+    // the binding, but some scope between the reference scope (inclusive) and
+    // binding scope (exclusive) makes a non-strict 'eval' call, that might
+    // possibly introduce variable bindings shadowing the found one. Thus the
+    // found variable binding is just a guess.
+    BOUND_EVAL_SHADOWED,
+
+    // The variable reference could not be statically resolved to any binding
+    // and thus should be considered referencing a global variable. NULL is
+    // returned. The variable reference is not inside any 'with' statement and
+    // no scope between the reference scope (inclusive) and global scope
+    // (exclusive) makes a non-strict 'eval' call.
+    UNBOUND,
+
+    // The variable reference could not be statically resolved to any binding
+    // NULL is returned. The variable reference is not inside any 'with'
+    // statement, but some scope between the reference scope (inclusive) and
+    // global scope (exclusive) makes a non-strict 'eval' call, that might
+    // possibly introduce a variable binding. Thus the reference should be
+    // considered referencing a global variable unless it is shadowed by an
+    // 'eval' introduced binding.
+    UNBOUND_EVAL_SHADOWED,
+
+    // The variable could not be statically resolved and needs to be looked up
+    // dynamically. NULL is returned. There are two possible reasons:
+    // * A 'with' statement has been encountered and there is no variable
+    //   binding for the name between the variable reference and the 'with'.
+    //   The variable potentially references a property of the 'with' object.
+    // * The code is being executed as part of a call to 'eval' and the calling
+    //   context chain contains either a variable binding for the name or it
+    //   contains a 'with' context.
+    DYNAMIC_LOOKUP
+  };
+
+  // Lookup a variable reference given by name recursively starting with this
+  // scope. If the code is executed because of a call to 'eval', the context
+  // parameter should be set to the calling context of 'eval'.
   Variable* LookupRecursive(Handle<String> name,
-                            bool from_inner_function,
-                            Variable** invalidated_local);
+                            BindingKind* binding_kind);
   void ResolveVariable(Scope* global_scope,
-                       Handle<Context> context,
                        VariableProxy* proxy);
-  void ResolveVariablesRecursively(Scope* global_scope,
-                                   Handle<Context> context);
+  void ResolveVariablesRecursively(Scope* global_scope);
 
   // Scope analysis.
-  bool PropagateScopeInfo(bool outer_scope_calls_eval,
-                          bool outer_scope_calls_non_strict_eval,
-                          bool outer_scope_is_eval_scope);
+  bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
   bool HasTrivialContext() const;
 
   // Predicates.
@@ -438,8 +537,8 @@
   void AllocateVariablesRecursively();
 
  private:
-  // Construct a function or block scope based on the scope info.
-  Scope(Scope* inner_scope, Type type, Handle<SerializedScopeInfo> scope_info);
+  // Construct a scope based on the scope info.
+  Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
 
   // Construct a catch scope with a binding for the name.
   Scope(Scope* inner_scope, Handle<String> catch_variable_name);
@@ -451,9 +550,9 @@
     }
   }
 
-  void SetDefaults(Type type,
+  void SetDefaults(ScopeType type,
                    Scope* outer_scope,
-                   Handle<SerializedScopeInfo> scope_info);
+                   Handle<ScopeInfo> scope_info);
 };
 
 } }  // namespace v8::internal
diff --git a/src/serialize.cc b/src/serialize.cc
index ecb480a..5830c64 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -300,16 +300,28 @@
       RUNTIME_ENTRY,
       4,
       "HandleScope::DeleteExtensions");
+  Add(ExternalReference::
+          incremental_marking_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      5,
+      "IncrementalMarking::RecordWrite");
+  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+      RUNTIME_ENTRY,
+      6,
+      "StoreBuffer::StoreBufferOverflow");
+  Add(ExternalReference::
+          incremental_evacuation_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      7,
+      "IncrementalMarking::RecordWrite");
+
+
 
   // Miscellaneous
-  Add(ExternalReference::the_hole_value_location(isolate).address(),
-      UNCLASSIFIED,
-      2,
-      "Factory::the_hole_value().location()");
-  Add(ExternalReference::roots_address(isolate).address(),
+  Add(ExternalReference::roots_array_start(isolate).address(),
       UNCLASSIFIED,
       3,
-      "Heap::roots_address()");
+      "Heap::roots_array_start()");
   Add(ExternalReference::address_of_stack_limit(isolate).address(),
       UNCLASSIFIED,
       4,
@@ -351,129 +363,137 @@
       "Heap::always_allocate_scope_depth()");
   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
       UNCLASSIFIED,
-      13,
+      14,
       "Heap::NewSpaceAllocationLimitAddress()");
   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
       UNCLASSIFIED,
-      14,
+      15,
       "Heap::NewSpaceAllocationTopAddress()");
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Add(ExternalReference::debug_break(isolate).address(),
       UNCLASSIFIED,
-      15,
+      16,
       "Debug::Break()");
   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
       UNCLASSIFIED,
-      16,
+      17,
       "Debug::step_in_fp_addr()");
 #endif
   Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
       UNCLASSIFIED,
-      17,
+      18,
       "add_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
       UNCLASSIFIED,
-      18,
+      19,
       "sub_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
       UNCLASSIFIED,
-      19,
+      20,
       "mul_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
       UNCLASSIFIED,
-      20,
+      21,
       "div_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
       UNCLASSIFIED,
-      21,
+      22,
       "mod_two_doubles");
   Add(ExternalReference::compare_doubles(isolate).address(),
       UNCLASSIFIED,
-      22,
+      23,
       "compare_doubles");
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
       UNCLASSIFIED,
-      23,
+      24,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
       UNCLASSIFIED,
-      24,
+      25,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack(isolate).address(),
       UNCLASSIFIED,
-      25,
+      26,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      26,
+      27,
       "NativeRegExpMacroAssembler::word_character_map");
 #endif  // V8_INTERPRETED_REGEXP
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
       UNCLASSIFIED,
-      27,
+      28,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
       UNCLASSIFIED,
-      28,
+      29,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
       UNCLASSIFIED,
-      29,
+      30,
       "TranscendentalCache::caches()");
   Add(ExternalReference::handle_scope_next_address().address(),
       UNCLASSIFIED,
-      30,
+      31,
       "HandleScope::next");
   Add(ExternalReference::handle_scope_limit_address().address(),
       UNCLASSIFIED,
-      31,
+      32,
       "HandleScope::limit");
   Add(ExternalReference::handle_scope_level_address().address(),
       UNCLASSIFIED,
-      32,
+      33,
       "HandleScope::level");
   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
       UNCLASSIFIED,
-      33,
+      34,
       "Deoptimizer::New()");
   Add(ExternalReference::compute_output_frames_function(isolate).address(),
       UNCLASSIFIED,
-      34,
+      35,
       "Deoptimizer::ComputeOutputFrames()");
   Add(ExternalReference::address_of_min_int().address(),
       UNCLASSIFIED,
-      35,
+      36,
       "LDoubleConstant::min_int");
   Add(ExternalReference::address_of_one_half().address(),
       UNCLASSIFIED,
-      36,
+      37,
       "LDoubleConstant::one_half");
   Add(ExternalReference::isolate_address().address(),
       UNCLASSIFIED,
-      37,
+      38,
       "isolate");
   Add(ExternalReference::address_of_minus_zero().address(),
       UNCLASSIFIED,
-      38,
+      39,
       "LDoubleConstant::minus_zero");
   Add(ExternalReference::address_of_negative_infinity().address(),
       UNCLASSIFIED,
-      39,
+      40,
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
       UNCLASSIFIED,
-      40,
+      41,
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function(isolate).address(),
       UNCLASSIFIED,
-      41,
-      "power_double_int_function");
-  Add(ExternalReference::arguments_marker_location(isolate).address(),
-      UNCLASSIFIED,
       42,
-      "Factory::arguments_marker().location()");
+      "power_double_int_function");
+  Add(ExternalReference::store_buffer_top(isolate).address(),
+      UNCLASSIFIED,
+      43,
+      "store_buffer_top");
+  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+      UNCLASSIFIED,
+      44,
+      "canonical_nan");
+  Add(ExternalReference::address_of_the_hole_nan().address(),
+      UNCLASSIFIED,
+      45,
+      "the_hole_nan");
 }
 
 
@@ -569,6 +589,7 @@
       maybe_new_allocation =
           reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
     }
+    ASSERT(!maybe_new_allocation->IsFailure());
     Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
     HeapObject* new_object = HeapObject::cast(new_allocation);
     address = new_object->address();
@@ -577,14 +598,13 @@
     ASSERT(SpaceIsLarge(space_index));
     LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
     Object* new_allocation;
-    if (space_index == kLargeData) {
-      new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
-    } else if (space_index == kLargeFixedArray) {
+    if (space_index == kLargeData || space_index == kLargeFixedArray) {
       new_allocation =
-          lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
+          lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
     } else {
       ASSERT_EQ(kLargeCode, space_index);
-      new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
+      new_allocation =
+          lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
     }
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
@@ -629,6 +649,7 @@
 
 void Deserializer::Deserialize() {
   isolate_ = Isolate::Current();
+  ASSERT(isolate_ != NULL);
   // Don't GC while deserializing - just expand the heap.
   AlwaysAllocateScope always_allocate;
   // Don't use the free lists while deserializing.
@@ -648,6 +669,14 @@
 
   isolate_->heap()->set_global_contexts_list(
       isolate_->heap()->undefined_value());
+
+  // Update data pointers to the external strings containing natives sources.
+  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+    Object* source = isolate_->heap()->natives_source_cache()->get(i);
+    if (!source->IsUndefined()) {
+      ExternalAsciiString::cast(source)->update_data_cache();
+    }
+  }
 }
 
 
@@ -685,9 +714,8 @@
 // This routine writes the new object into the pointer provided and then
 // returns true if the new object was in young space and false otherwise.
 // The reason for this strange interface is that otherwise the object is
-// written very late, which means the ByteArray map is not set up by the
-// time we need to use it to mark the space at the end of a page free (by
-// making it into a byte array).
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
 void Deserializer::ReadObject(int space_number,
                               Space* space,
                               Object** write_back) {
@@ -737,8 +765,13 @@
 void Deserializer::ReadChunk(Object** current,
                              Object** limit,
                              int source_space,
-                             Address address) {
+                             Address current_object_address) {
   Isolate* const isolate = isolate_;
+  bool write_barrier_needed = (current_object_address != NULL &&
+                               source_space != NEW_SPACE &&
+                               source_space != CELL_SPACE &&
+                               source_space != CODE_SPACE &&
+                               source_space != OLD_DATA_SPACE);
   while (current < limit) {
     int data = source_->Get();
     switch (data) {
@@ -758,8 +791,7 @@
         if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
           ASSIGN_DEST_SPACE(space_number)                                      \
           ReadObject(space_number, dest_space, current);                       \
-          emit_write_barrier =                                                 \
-            (space_number == NEW_SPACE && source_space != NEW_SPACE);          \
+          emit_write_barrier = (space_number == NEW_SPACE);                    \
         } else {                                                               \
           Object* new_object = NULL;  /* May not be a real Object pointer. */  \
           if (where == kNewObject) {                                           \
@@ -767,25 +799,25 @@
             ReadObject(space_number, dest_space, &new_object);                 \
           } else if (where == kRootArray) {                                    \
             int root_id = source_->GetInt();                                   \
-            new_object = isolate->heap()->roots_address()[root_id];            \
+            new_object = isolate->heap()->roots_array_start()[root_id];        \
+            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
           } else if (where == kPartialSnapshotCache) {                         \
             int cache_index = source_->GetInt();                               \
             new_object = isolate->serialize_partial_snapshot_cache()           \
                 [cache_index];                                                 \
+            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
           } else if (where == kExternalReference) {                            \
             int reference_id = source_->GetInt();                              \
             Address address = external_reference_decoder_->                    \
                 Decode(reference_id);                                          \
             new_object = reinterpret_cast<Object*>(address);                   \
           } else if (where == kBackref) {                                      \
-            emit_write_barrier =                                               \
-              (space_number == NEW_SPACE && source_space != NEW_SPACE);        \
+            emit_write_barrier = (space_number == NEW_SPACE);                  \
             new_object = GetAddressFromEnd(data & kSpaceMask);                 \
           } else {                                                             \
             ASSERT(where == kFromStart);                                       \
             if (offset_from_start == kUnknownOffsetFromStart) {                \
-              emit_write_barrier =                                             \
-                (space_number == NEW_SPACE && source_space != NEW_SPACE);      \
+              emit_write_barrier = (space_number == NEW_SPACE);                \
               new_object = GetAddressFromStart(data & kSpaceMask);             \
             } else {                                                           \
               Address object_address = pages_[space_number][0] +               \
@@ -812,12 +844,14 @@
             *current = new_object;                                             \
           }                                                                    \
         }                                                                      \
-        if (emit_write_barrier) {                                              \
-          isolate->heap()->RecordWrite(address, static_cast<int>(              \
-              reinterpret_cast<Address>(current) - address));                  \
+        if (emit_write_barrier && write_barrier_needed) {                      \
+          Address current_address = reinterpret_cast<Address>(current);        \
+          isolate->heap()->RecordWrite(                                        \
+              current_object_address,                                          \
+              static_cast<int>(current_address - current_object_address));     \
         }                                                                      \
         if (!current_was_incremented) {                                        \
-          current++;   /* Increment current if it wasn't done above. */        \
+          current++;                                                           \
         }                                                                      \
         break;                                                                 \
       }                                                                        \
@@ -864,11 +898,17 @@
   CASE_STATEMENT(where, how, within, kLargeCode)                               \
   CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
 
-#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number,                    \
-                                       space_number,                           \
-                                       offset_from_start)                      \
-  CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number)      \
-  CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
+#define FOUR_CASES(byte_code)             \
+  case byte_code:                         \
+  case byte_code + 1:                     \
+  case byte_code + 2:                     \
+  case byte_code + 3:
+
+#define SIXTEEN_CASES(byte_code)          \
+  FOUR_CASES(byte_code)                   \
+  FOUR_CASES(byte_code + 4)               \
+  FOUR_CASES(byte_code + 8)               \
+  FOUR_CASES(byte_code + 12)
 
       // We generate 15 cases and bodies that process special tags that combine
       // the raw data tag and the length into one byte.
@@ -892,6 +932,38 @@
         break;
       }
 
+      SIXTEEN_CASES(kRootArrayLowConstants)
+      SIXTEEN_CASES(kRootArrayHighConstants) {
+        int root_id = RootArrayConstantFromByteCode(data);
+        Object* object = isolate->heap()->roots_array_start()[root_id];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        *current++ = object;
+        break;
+      }
+
+      case kRepeat: {
+        int repeats = source_->GetInt();
+        Object* object = current[-1];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) current[i] = object;
+        current += repeats;
+        break;
+      }
+
+      STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
+                    Heap::kOldSpaceRoots);
+      STATIC_ASSERT(kMaxRepeats == 12);
+      FOUR_CASES(kConstantRepeat)
+      FOUR_CASES(kConstantRepeat + 4)
+      FOUR_CASES(kConstantRepeat + 8) {
+        int repeats = RepeatsForCode(data);
+        Object* object = current[-1];
+        ASSERT(!isolate->heap()->InNewSpace(object));
+        for (int i = 0; i < repeats; i++) current[i] = object;
+        current += repeats;
+        break;
+      }
+
       // Deserialize a new object and write a pointer to it to the current
       // object.
       ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
@@ -917,9 +989,6 @@
       // start and write a pointer to its first instruction to the current code
       // object.
       ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
-      // Find an already deserialized object at one of the predetermined popular
-      // offsets from the start and write a pointer to it in the current object.
-      COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
       // Find an object in the roots array and write a pointer to it to the
       // current object.
       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
@@ -961,7 +1030,6 @@
 #undef CASE_BODY
 #undef ONE_PER_SPACE
 #undef ALL_SPACES
-#undef EMIT_COMMON_REFERENCE_PATTERNS
 #undef ASSIGN_DEST_SPACE
 
       case kNewPage: {
@@ -973,6 +1041,11 @@
         break;
       }
 
+      case kSkip: {
+        current++;
+        break;
+      }
+
       case kNativesStringResource: {
         int index = source_->Get();
         Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1043,10 +1116,12 @@
     : sink_(sink),
       current_root_index_(0),
       external_reference_encoder_(new ExternalReferenceEncoder),
-      large_object_total_(0) {
+      large_object_total_(0),
+      root_index_wave_front_(0) {
+  isolate_ = Isolate::Current();
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  ASSERT(Isolate::Current()->IsDefaultIsolate());
+  ASSERT(isolate_->IsDefaultIsolate());
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
   }
@@ -1066,11 +1141,8 @@
   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
   CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
   // We don't support serializing installed extensions.
-  for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
-       ext != NULL;
-       ext = ext->next()) {
-    CHECK_NE(v8::INSTALLED, ext->state());
-  }
+  CHECK(!isolate->has_installed_extensions());
+
   HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
 }
 
@@ -1097,8 +1169,17 @@
 
 
 void Serializer::VisitPointers(Object** start, Object** end) {
+  Isolate* isolate = Isolate::Current();
+
   for (Object** current = start; current < end; current++) {
-    if ((*current)->IsSmi()) {
+    if (start == isolate->heap()->roots_array_start()) {
+      root_index_wave_front_ =
+          Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
+    }
+    if (reinterpret_cast<Address>(current) ==
+        isolate->heap()->store_buffer()->TopAddress()) {
+      sink_->Put(kSkip, "Skip");
+    } else if ((*current)->IsSmi()) {
       sink_->Put(kRawData, "RawData");
       sink_->PutInt(kPointerSize, "length");
       for (int i = 0; i < kPointerSize; i++) {
@@ -1162,10 +1243,12 @@
 }
 
 
-int PartialSerializer::RootIndex(HeapObject* heap_object) {
-  for (int i = 0; i < Heap::kRootListLength; i++) {
-    Object* root = HEAP->roots_address()[i];
-    if (root == heap_object) return i;
+int Serializer::RootIndex(HeapObject* heap_object) {
+  Heap* heap = HEAP;
+  if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
+  for (int i = 0; i < root_index_wave_front_; i++) {
+    Object* root = heap->roots_array_start()[i];
+    if (!root->IsSmi() && root == heap_object) return i;
   }
   return kInvalidRootIndex;
 }
@@ -1201,18 +1284,8 @@
   // all objects) then we should shift out the bits that are always 0.
   if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
   if (from_start) {
-#define COMMON_REFS_CASE(pseudo_space, actual_space, offset)                   \
-    if (space == actual_space && address == offset &&                          \
-        how_to_code == kPlain && where_to_point == kStartOfObject) {           \
-      sink_->Put(kFromStart + how_to_code + where_to_point +                   \
-                 pseudo_space, "RefSer");                                      \
-    } else  /* NOLINT */
-    COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
-    {  /* NOLINT */
-      sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
-      sink_->PutInt(address, "address");
-    }
+    sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
+    sink_->PutInt(address, "address");
   } else {
     sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
     sink_->PutInt(address, "address");
@@ -1227,6 +1300,12 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
+  int root_index;
+  if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+    PutRoot(root_index, heap_object, how_to_code, where_to_point);
+    return;
+  }
+
   if (address_mapper_.IsMapped(heap_object)) {
     int space = SpaceOfAlreadySerializedObject(heap_object);
     int address = address_mapper_.MappedTo(heap_object);
@@ -1257,6 +1336,28 @@
 }
 
 
+void Serializer::PutRoot(int root_index,
+                         HeapObject* object,
+                         SerializerDeserializer::HowToCode how_to_code,
+                         SerializerDeserializer::WhereToPoint where_to_point) {
+  if (how_to_code == kPlain &&
+      where_to_point == kStartOfObject &&
+      root_index < kRootArrayNumberOfConstantEncodings &&
+      !HEAP->InNewSpace(object)) {
+    if (root_index < kRootArrayNumberOfLowConstantEncodings) {
+      sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
+    } else {
+      sink_->Put(kRootArrayHighConstants + root_index -
+                     kRootArrayNumberOfLowConstantEncodings,
+                 "RootHiConstant");
+    }
+  } else {
+    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_->PutInt(root_index, "root_index");
+  }
+}
+
+
 void PartialSerializer::SerializeObject(
     Object* o,
     HowToCode how_to_code,
@@ -1266,8 +1367,7 @@
 
   int root_index;
   if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
+    PutRoot(root_index, heap_object, how_to_code, where_to_point);
     return;
   }
 
@@ -1345,14 +1445,48 @@
     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
 
     while (current < end && !(*current)->IsSmi()) {
-      serializer_->SerializeObject(*current, kPlain, kStartOfObject);
-      bytes_processed_so_far_ += kPointerSize;
-      current++;
+      HeapObject* current_contents = HeapObject::cast(*current);
+      int root_index = serializer_->RootIndex(current_contents);
+      // Repeats are not subject to the write barrier so there are only some
+      // objects that can be used in a repeat encoding.  These are the early
+      // ones in the root array that are never in new space.
+      if (current != start &&
+          root_index != kInvalidRootIndex &&
+          root_index < kRootArrayNumberOfConstantEncodings &&
+          current_contents == current[-1]) {
+        ASSERT(!HEAP->InNewSpace(current_contents));
+        int repeat_count = 1;
+        while (current < end - 1 && current[repeat_count] == current_contents) {
+          repeat_count++;
+        }
+        current += repeat_count;
+        bytes_processed_so_far_ += repeat_count * kPointerSize;
+        if (repeat_count > kMaxRepeats) {
+          sink_->Put(kRepeat, "SerializeRepeats");
+          sink_->PutInt(repeat_count, "SerializeRepeats");
+        } else {
+          sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
+        }
+      } else {
+        serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
+        bytes_processed_so_far_ += kPointerSize;
+        current++;
+      }
     }
   }
 }
 
 
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+  Object** current = rinfo->target_object_address();
+
+  OutputRawData(rinfo->target_address_address());
+  HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+  serializer_->SerializeObject(*current, representation, kStartOfObject);
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
                                                            Address* end) {
   Address references_start = reinterpret_cast<Address>(start);
@@ -1367,6 +1501,20 @@
 }
 
 
+void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+  Address references_start = rinfo->target_address_address();
+  OutputRawData(references_start);
+
+  Address* current = rinfo->target_reference_address();
+  int representation = rinfo->IsCodedSpecially() ?
+                       kFromCode + kStartOfObject : kPlain + kStartOfObject;
+  sink_->Put(kExternalReference + representation, "ExternalRef");
+  int reference_id = serializer_->EncodeExternalReference(*current);
+  sink_->PutInt(reference_id, "reference id");
+  bytes_processed_so_far_ += rinfo->target_address_size();
+}
+
+
 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
   Address target_start = rinfo->target_address_address();
   OutputRawData(target_start);
@@ -1420,7 +1568,7 @@
     if (!source->IsUndefined()) {
       ExternalAsciiString* string = ExternalAsciiString::cast(source);
       typedef v8::String::ExternalAsciiStringResource Resource;
-      Resource* resource = string->resource();
+      const Resource* resource = string->resource();
       if (resource == *resource_pointer) {
         sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
@@ -1518,8 +1666,8 @@
     // serialized address.
     CHECK(IsPowerOf2(Page::kPageSize));
     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
-    CHECK(size <= Page::kObjectAreaSize);
-    if (used_in_this_page + size > Page::kObjectAreaSize) {
+    CHECK(size <= SpaceAreaSize(space));
+    if (used_in_this_page + size > SpaceAreaSize(space)) {
       *new_page = true;
       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
     }
@@ -1530,4 +1678,13 @@
 }
 
 
+int Serializer::SpaceAreaSize(int space) {
+  if (space == CODE_SPACE) {
+    return isolate_->memory_allocator()->CodePageAreaSize();
+  } else {
+    return Page::kPageSize - Page::kObjectStartOffset;
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index 66d6fb5..bd9c0d8 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -187,24 +187,6 @@
 };
 
 
-// It is very common to have a reference to objects at certain offsets in the
-// heap.  These offsets have been determined experimentally.  We code
-// references to such objects in a single byte that encodes the way the pointer
-// is written (only plain pointers allowed), the space number and the offset.
-// This only works for objects in the first page of a space.  Don't use this for
-// things in newspace since it bypasses the write barrier.
-
-static const int k64 = (sizeof(uintptr_t) - 4) / 4;
-
-#define COMMON_REFERENCE_PATTERNS(f)                               \
-  f(kNumberOfSpaces, 2, (11 - k64))                                \
-  f((kNumberOfSpaces + 1), 2, 0)                                   \
-  f((kNumberOfSpaces + 2), 2, (142 - 16 * k64))                    \
-  f((kNumberOfSpaces + 3), 2, (74 - 15 * k64))                     \
-  f((kNumberOfSpaces + 4), 2, 5)                                   \
-  f((kNumberOfSpaces + 5), 1, 135)                                 \
-  f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
-
 #define COMMON_RAW_LENGTHS(f)        \
   f(1, 1)  \
   f(2, 2)  \
@@ -238,10 +220,11 @@
     kRootArray = 0x9,               // Object is found in root array.
     kPartialSnapshotCache = 0xa,    // Object is in the cache.
     kExternalReference = 0xb,       // Pointer to an external reference.
-    // 0xc-0xf                         Free.
+    kSkip = 0xc,                    // Skip a pointer sized cell.
+    // 0xd-0xf                         Free.
     kBackref = 0x10,                 // Object is described relative to end.
     // 0x11-0x18                       One per space.
-    // 0x19-0x1f                       Common backref offsets.
+    // 0x19-0x1f                       Free.
     kFromStart = 0x20,              // Object is described relative to start.
     // 0x21-0x28                       One per space.
     // 0x29-0x2f                       Free.
@@ -278,9 +261,29 @@
   // is referred to from external strings in the snapshot.
   static const int kNativesStringResource = 0x71;
   static const int kNewPage = 0x72;
-  // 0x73-0x7f                            Free.
-  // 0xb0-0xbf                            Free.
-  // 0xf0-0xff                            Free.
+  static const int kRepeat = 0x73;
+  static const int kConstantRepeat = 0x74;
+  // 0x74-0x7f            Repeat last word (subtract 0x73 to get the count).
+  static const int kMaxRepeats = 0x7f - 0x73;
+  static int CodeForRepeats(int repeats) {
+    ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
+    return 0x73 + repeats;
+  }
+  static int RepeatsForCode(int byte_code) {
+    ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
+    return byte_code - 0x73;
+  }
+  static const int kRootArrayLowConstants = 0xb0;
+  // 0xb0-0xbf            Things from the first 16 elements of the root array.
+  static const int kRootArrayHighConstants = 0xf0;
+  // 0xf0-0xff            Things from the next 16 elements of the root array.
+  static const int kRootArrayNumberOfConstantEncodings = 0x20;
+  static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
+  static int RootArrayConstantFromByteCode(int byte_code) {
+    int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
+    ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
+    return constant;
+  }
 
 
   static const int kLargeData = LAST_SPACE;
@@ -353,7 +356,13 @@
     UNREACHABLE();
   }
 
-  void ReadChunk(Object** start, Object** end, int space, Address address);
+  // Fills in some heap data in an area from start to end (non-inclusive).  The
+  // space id is used for the write barrier.  The object_address is the address
+  // of the object we are writing into, or NULL if we are not writing into an
+  // object, ie if we are writing a series of tagged values that are not on the
+  // heap.
+  void ReadChunk(
+      Object** start, Object** end, int space, Address object_address);
   HeapObject* GetAddressFromStart(int space);
   inline HeapObject* GetAddressFromEnd(int space);
   Address Allocate(int space_number, Space* space, int size);
@@ -474,14 +483,22 @@
   static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
   static bool enabled() { return serialization_enabled_; }
   SerializationAddressMapper* address_mapper() { return &address_mapper_; }
+  void PutRoot(
+      int index, HeapObject* object, HowToCode how, WhereToPoint where);
 #ifdef DEBUG
   virtual void Synchronize(const char* tag);
 #endif
 
  protected:
   static const int kInvalidRootIndex = -1;
-  virtual int RootIndex(HeapObject* heap_object) = 0;
+
+  int RootIndex(HeapObject* heap_object);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+  intptr_t root_index_wave_front() { return root_index_wave_front_; }
+  void set_root_index_wave_front(intptr_t value) {
+    ASSERT(value >= root_index_wave_front_);
+    root_index_wave_front_ = value;
+  }
 
   class ObjectSerializer : public ObjectVisitor {
    public:
@@ -497,7 +514,9 @@
         bytes_processed_so_far_(0) { }
     void Serialize();
     void VisitPointers(Object** start, Object** end);
+    void VisitEmbeddedPointer(RelocInfo* target);
     void VisitExternalReferences(Address* start, Address* end);
+    void VisitExternalReference(RelocInfo* rinfo);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
     void VisitGlobalPropertyCell(RelocInfo* rinfo);
@@ -544,6 +563,9 @@
     return external_reference_encoder_->Encode(addr);
   }
 
+  int SpaceAreaSize(int space);
+
+  Isolate* isolate_;
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
   // just numbered sequentially since relative addresses make no
@@ -557,6 +579,7 @@
   static bool too_late_to_enable_now_;
   int large_object_total_;
   SerializationAddressMapper address_mapper_;
+  intptr_t root_index_wave_front_;
 
   friend class ObjectSerializer;
   friend class Deserializer;
@@ -571,6 +594,7 @@
                     SnapshotByteSink* sink)
     : Serializer(sink),
       startup_serializer_(startup_snapshot_serializer) {
+    set_root_index_wave_front(Heap::kStrongRootListLength);
   }
 
   // Serialize the objects reachable from a single object pointer.
@@ -580,7 +604,6 @@
                                WhereToPoint where_to_point);
 
  protected:
-  virtual int RootIndex(HeapObject* o);
   virtual int PartialSnapshotCacheIndex(HeapObject* o);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     // Scripts should be referred only through shared function infos.  We can't
@@ -590,7 +613,7 @@
     ASSERT(!o->IsScript());
     return o->IsString() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
-           o->IsSerializedScopeInfo() ||
+           o->IsScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
   }
 
@@ -605,7 +628,7 @@
   explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
     // Clear the cache of objects used by the partial snapshot.  After the
     // strong roots have been serialized we can create a partial snapshot
-    // which will repopulate the cache with objects neede by that partial
+    // which will repopulate the cache with objects needed by that partial
     // snapshot.
     Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
   }
@@ -624,7 +647,6 @@
   }
 
  private:
-  virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     return false;
   }
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 35d7224..c662980 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,355 +37,211 @@
 
 
 // -----------------------------------------------------------------------------
+// Bitmap
+
+void Bitmap::Clear(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+  chunk->ResetLiveBytes();
+}
+
+
+// -----------------------------------------------------------------------------
 // PageIterator
 
+
+PageIterator::PageIterator(PagedSpace* space)
+    : space_(space),
+      prev_page_(&space->anchor_),
+      next_page_(prev_page_->next_page()) { }
+
+
 bool PageIterator::has_next() {
-  return prev_page_ != stop_page_;
+  return next_page_ != &space_->anchor_;
 }
 
 
 Page* PageIterator::next() {
   ASSERT(has_next());
-  prev_page_ = (prev_page_ == NULL)
-               ? space_->first_page_
-               : prev_page_->next_page();
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
   return prev_page_;
 }
 
 
 // -----------------------------------------------------------------------------
-// Page
+// NewSpacePageIterator
 
-Page* Page::next_page() {
-  return heap_->isolate()->memory_allocator()->GetNextPage(this);
+
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+    : prev_page_(space->anchor()),
+      next_page_(prev_page_->next_page()),
+      last_page_(prev_page_->prev_page()) { }
+
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+      next_page_(NewSpacePage::FromAddress(start)),
+      last_page_(NewSpacePage::FromLimit(limit)) {
+  SemiSpace::AssertValidRange(start, limit);
 }
 
 
-Address Page::AllocationTop() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  return owner->PageAllocationTop(this);
+bool NewSpacePageIterator::has_next() {
+  return prev_page_ != last_page_;
 }
 
 
-Address Page::AllocationWatermark() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  if (this == owner->AllocationTopPage()) {
-    return owner->top();
-  }
-  return address() + AllocationWatermarkOffset();
+NewSpacePage* NewSpacePageIterator::next() {
+  ASSERT(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
 }
 
 
-uint32_t Page::AllocationWatermarkOffset() {
-  return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
-                               kAllocationWatermarkOffsetShift);
-}
-
-
-void Page::SetAllocationWatermark(Address allocation_watermark) {
-  if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
-    // When iterating intergenerational references during scavenge
-    // we might decide to promote an encountered young object.
-    // We will allocate a space for such an object and put it
-    // into the promotion queue to process it later.
-    // If space for object was allocated somewhere beyond allocation
-    // watermark this might cause garbage pointers to appear under allocation
-    // watermark. To avoid visiting them during dirty regions iteration
-    // which might be still in progress we store a valid allocation watermark
-    // value and mark this page as having an invalid watermark.
-    SetCachedAllocationWatermark(AllocationWatermark());
-    InvalidateWatermark(true);
-  }
-
-  flags_ = (flags_ & kFlagsMask) |
-           Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
-  ASSERT(AllocationWatermarkOffset()
-         == static_cast<uint32_t>(Offset(allocation_watermark)));
-}
-
-
-void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
-  mc_first_forwarded = allocation_watermark;
-}
-
-
-Address Page::CachedAllocationWatermark() {
-  return mc_first_forwarded;
-}
-
-
-uint32_t Page::GetRegionMarks() {
-  return dirty_regions_;
-}
-
-
-void Page::SetRegionMarks(uint32_t marks) {
-  dirty_regions_ = marks;
-}
-
-
-int Page::GetRegionNumberForAddress(Address addr) {
-  // Each page is divided into 256 byte regions. Each region has a corresponding
-  // dirty mark bit in the page header. Region can contain intergenerational
-  // references iff its dirty mark is set.
-  // A normal 8K page contains exactly 32 regions so all region marks fit
-  // into 32-bit integer field. To calculate a region number we just divide
-  // offset inside page by region size.
-  // A large page can contain more then 32 regions. But we want to avoid
-  // additional write barrier code for distinguishing between large and normal
-  // pages so we just ignore the fact that addr points into a large page and
-  // calculate region number as if addr pointed into a normal 8K page. This way
-  // we get a region number modulo 32 so for large pages several regions might
-  // be mapped to a single dirty mark.
-  ASSERT_PAGE_ALIGNED(this->address());
-  STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
-  // We are using masking with kPageAlignmentMask instead of Page::Offset()
-  // to get an offset to the beginning of 8K page containing addr not to the
-  // beginning of actual page which can be bigger then 8K.
-  intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
-  return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
-}
-
-
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
-  return 1 << GetRegionNumberForAddress(addr);
-}
-
-
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
-  uint32_t result = 0;
-  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
-  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
-    result = kAllRegionsDirtyMarks;
-  } else if (length_in_bytes > 0) {
-    int start_region = GetRegionNumberForAddress(start);
-    int end_region =
-        GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
-    uint32_t start_mask = (~0) << start_region;
-    uint32_t end_mask = ~((~1) << end_region);
-    result = start_mask & end_mask;
-    // if end_region < start_region, the mask is ored.
-    if (result == 0) result = start_mask | end_mask;
-  }
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    uint32_t expected = 0;
-    for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
-      expected |= GetRegionMaskForAddress(a);
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+  while (cur_addr_ != cur_end_) {
+    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+      cur_addr_ = space_->limit();
+      continue;
     }
-    ASSERT(expected == result);
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    cur_addr_ += obj_size;
+    ASSERT(cur_addr_ <= cur_end_);
+    if (!obj->IsFiller()) {
+      ASSERT_OBJECT_SIZE(obj_size);
+      return obj;
+    }
   }
-#endif
-  return result;
-}
-
-
-void Page::MarkRegionDirty(Address address) {
-  SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
-
-
-bool Page::IsRegionDirty(Address address) {
-  return GetRegionMarks() & GetRegionMaskForAddress(address);
-}
-
-
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
-  int rstart = GetRegionNumberForAddress(start);
-  int rend = GetRegionNumberForAddress(end);
-
-  if (reaches_limit) {
-    end += 1;
-  }
-
-  if ((rend - rstart) == 0) {
-    return;
-  }
-
-  uint32_t bitmask = 0;
-
-  if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
-      || (start == ObjectAreaStart())) {
-    // First region is fully covered
-    bitmask = 1 << rstart;
-  }
-
-  while (++rstart < rend) {
-    bitmask |= 1 << rstart;
-  }
-
-  if (bitmask) {
-    SetRegionMarks(GetRegionMarks() & ~bitmask);
-  }
-}
-
-
-void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
-  heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
-}
-
-
-bool Page::IsWatermarkValid() {
-  return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
-      heap_->page_watermark_invalidated_mark_;
-}
-
-
-void Page::InvalidateWatermark(bool value) {
-  if (value) {
-    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-             heap_->page_watermark_invalidated_mark_;
-  } else {
-    flags_ =
-        (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-        (heap_->page_watermark_invalidated_mark_ ^
-         (1 << WATERMARK_INVALIDATED));
-  }
-
-  ASSERT(IsWatermarkValid() == !value);
-}
-
-
-bool Page::GetPageFlag(PageFlag flag) {
-  return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
-}
-
-
-void Page::SetPageFlag(PageFlag flag, bool value) {
-  if (value) {
-    flags_ |= static_cast<intptr_t>(1 << flag);
-  } else {
-    flags_ &= ~static_cast<intptr_t>(1 << flag);
-  }
-}
-
-
-void Page::ClearPageFlags() {
-  flags_ = 0;
-}
-
-
-void Page::ClearGCFields() {
-  InvalidateWatermark(true);
-  SetAllocationWatermark(ObjectAreaStart());
-  if (heap_->gc_state() == Heap::SCAVENGE) {
-    SetCachedAllocationWatermark(ObjectAreaStart());
-  }
-  SetRegionMarks(kAllRegionsCleanMarks);
-}
-
-
-bool Page::WasInUseBeforeMC() {
-  return GetPageFlag(WAS_IN_USE_BEFORE_MC);
-}
-
-
-void Page::SetWasInUseBeforeMC(bool was_in_use) {
-  SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
-}
-
-
-bool Page::IsLargeObjectPage() {
-  return !GetPageFlag(IS_NORMAL_PAGE);
-}
-
-
-void Page::SetIsLargeObjectPage(bool is_large_object_page) {
-  SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
-}
-
-Executability Page::PageExecutability() {
-  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-}
-
-
-void Page::SetPageExecutability(Executability executable) {
-  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
+  return NULL;
 }
 
 
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
-void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
-  address_ = a;
-  size_ = s;
-  owner_ = o;
-  executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
-  owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  OS::Protect(start, size);
 }
 
 
-bool MemoryAllocator::IsValidChunk(int chunk_id) {
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+void MemoryAllocator::Unprotect(Address start,
+                                size_t size,
+                                Executability executable) {
+  OS::Unprotect(start, size, executable);
 }
 
 
-bool MemoryAllocator::IsValidChunkId(int chunk_id) {
-  return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Protect(chunks_[id].address(), chunks_[id].size());
 }
 
 
-bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
-  ASSERT(p->is_valid());
-
-  int chunk_id = GetChunkId(p);
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() <= p->address()) &&
-         (p->address() < c.address() + c.size()) &&
-         (space == c.owner());
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                chunks_[id].owner()->executable() == EXECUTABLE);
 }
 
-
-Page* MemoryAllocator::GetNextPage(Page* p) {
-  ASSERT(p->is_valid());
-  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
-  return Page::FromAddress(AddressFrom<Address>(raw_addr));
-}
-
-
-int MemoryAllocator::GetChunkId(Page* p) {
-  ASSERT(p->is_valid());
-  return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
-}
-
-
-void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
-  ASSERT(prev->is_valid());
-  int chunk_id = GetChunkId(prev);
-  ASSERT_PAGE_ALIGNED(next->address());
-  prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
-}
-
-
-PagedSpace* MemoryAllocator::PageOwner(Page* page) {
-  int chunk_id = GetChunkId(page);
-  ASSERT(IsValidChunk(chunk_id));
-  return chunks_[chunk_id].owner();
-}
-
-
-bool MemoryAllocator::InInitialChunk(Address address) {
-  if (initial_chunk_ == NULL) return false;
-
-  Address start = static_cast<Address>(initial_chunk_->address());
-  return (start <= address) && (address < start + initial_chunk_->size());
-}
+#endif
 
 
 // --------------------------------------------------------------------------
 // PagedSpace
+Page* Page::Initialize(Heap* heap,
+                       MemoryChunk* chunk,
+                       Executability executable,
+                       PagedSpace* owner) {
+  Page* page = reinterpret_cast<Page*>(chunk);
+  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+  ASSERT(chunk->owner() == owner);
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
+
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  return page;
+}
+
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
   if (!p->is_valid()) return false;
-  return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
+  return p->owner() == this;
+}
+
+
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+  if (scan) {
+    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+    SetFlag(SCAN_ON_SCAVENGE);
+  } else {
+    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+  if (maybe->owner() != NULL) return maybe;
+  LargeObjectIterator iterator(HEAP->lo_space());
+  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+    // Fixed arrays are the only pointer-containing objects in large object
+    // space.
+    if (o->IsFixedArray()) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+      if (chunk->Contains(addr)) {
+        return chunk;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+    : state_(kOldPointerState),
+      old_pointer_iterator_(heap->old_pointer_space()),
+      map_iterator_(heap->map_space()),
+      lo_iterator_(heap->lo_space()) { }
+
+
+Page* Page::next_page() {
+  ASSERT(next_chunk()->owner() == owner());
+  return static_cast<Page*>(next_chunk());
+}
+
+
+Page* Page::prev_page() {
+  ASSERT(prev_chunk()->owner() == owner());
+  return static_cast<Page*>(prev_chunk());
+}
+
+
+void Page::set_next_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_next_chunk(page);
+}
+
+
+void Page::set_prev_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_prev_chunk(page);
 }
 
 
@@ -393,42 +249,41 @@
 // not contain slow case logic (eg, move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
-                                         int size_in_bytes) {
-  Address current_top = alloc_info->top;
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+  Address current_top = allocation_info_.top;
   Address new_top = current_top + size_in_bytes;
-  if (new_top > alloc_info->limit) return NULL;
+  if (new_top > allocation_info_.limit) return NULL;
 
-  alloc_info->top = new_top;
-  ASSERT(alloc_info->VerifyPagedAllocation());
-  accounting_stats_.AllocateBytes(size_in_bytes);
+  allocation_info_.top = new_top;
   return HeapObject::FromAddress(current_top);
 }
 
 
 // Raw allocation.
 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
-  ASSERT(HasBeenSetup());
-  ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
-  if (object != NULL) return object;
+  HeapObject* object = AllocateLinearly(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
+
+  object = free_list_.Allocate(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   object = SlowAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
-
-  return Failure::RetryAfterGC(identity());
-}
-
-
-// Reallocating (and promoting) objects during a compacting collection.
-MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
-  ASSERT(HasBeenSetup());
-  ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-  if (object != NULL) return object;
-
-  object = SlowMCAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   return Failure::RetryAfterGC(identity());
 }
@@ -437,27 +292,29 @@
 // -----------------------------------------------------------------------------
 // NewSpace
 
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
-                                           AllocationInfo* alloc_info) {
-  Address new_top = alloc_info->top + size_in_bytes;
-  if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
 
-  Object* obj = HeapObject::FromAddress(alloc_info->top);
-  alloc_info->top = new_top;
-#ifdef DEBUG
-  SemiSpace* space =
-      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
-  ASSERT(space->low() <= alloc_info->top
-         && alloc_info->top <= space->high()
-         && alloc_info->limit == space->high());
-#endif
+MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top;
+  if (allocation_info_.limit - old_top < size_in_bytes) {
+    return SlowAllocateRaw(size_in_bytes);
+  }
+
+  Object* obj = HeapObject::FromAddress(allocation_info_.top);
+  allocation_info_.top += size_in_bytes;
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
   return obj;
 }
 
 
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+  return static_cast<LargePage*>(chunk);
+}
+
+
 intptr_t LargeObjectSpace::Available() {
-  return LargeObjectChunk::ObjectSizeFor(
-      heap()->isolate()->memory_allocator()->Available());
+  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
 }
 
 
@@ -467,16 +324,23 @@
   ASSERT(string->IsSeqString());
   ASSERT(string->address() + StringType::SizeFor(string->length()) ==
          allocation_info_.top);
+  Address old_top = allocation_info_.top;
   allocation_info_.top =
       string->address() + StringType::SizeFor(length);
   string->set_length(length);
+  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
+    int delta = static_cast<int>(old_top - allocation_info_.top);
+    MemoryChunk::IncrementLiveBytes(string->address(), -delta);
+  }
 }
 
 
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  return object->map() == HEAP->raw_unchecked_byte_array_map()
-      || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
-      || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map()
+      || map == heap->raw_unchecked_one_pointer_filler_map()
+      || map == heap->raw_unchecked_two_pointer_filler_map();
 }
 
 } }  // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index 97c6d2a..1ee3359 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -35,112 +35,87 @@
 namespace v8 {
 namespace internal {
 
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  ASSERT((space).low() <= (info).top                  \
-         && (info).top <= (space).high()              \
-         && (info).limit == (space).high())
 
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize as if we have
+  // reached the end of the anchor page, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             NULL);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                        HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
-                                       HeapObjectCallback size_func) {
-  Initialize(start, space->top(), size_func);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize the current
+  // address and end as NULL, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             size_func);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(Page* page,
                                        HeapObjectCallback size_func) {
-  Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+  Space* owner = page->owner();
+  ASSERT(owner == HEAP->old_pointer_space() ||
+         owner == HEAP->old_data_space() ||
+         owner == HEAP->map_space() ||
+         owner == HEAP->cell_space() ||
+         owner == HEAP->code_space());
+  Initialize(reinterpret_cast<PagedSpace*>(owner),
+             page->area_start(),
+             page->area_end(),
+             kOnePageOnly,
+             size_func);
+  ASSERT(page->WasSweptPrecisely());
 }
 
 
-void HeapObjectIterator::Initialize(Address cur, Address end,
+void HeapObjectIterator::Initialize(PagedSpace* space,
+                                    Address cur, Address end,
+                                    HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
+  // Check that we actually can iterate this space.
+  ASSERT(!space->was_swept_conservatively());
+
+  space_ = space;
   cur_addr_ = cur;
-  end_addr_ = end;
-  end_page_ = Page::FromAllocationTop(end);
+  cur_end_ = end;
+  page_mode_ = mode;
   size_func_ = size_f;
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
-
-#ifdef DEBUG
-  Verify();
-#endif
 }
 
 
-HeapObject* HeapObjectIterator::FromNextPage() {
-  if (cur_addr_ == end_addr_) return NULL;
-
-  Page* cur_page = Page::FromAllocationTop(cur_addr_);
-  cur_page = cur_page->next_page();
-  ASSERT(cur_page->is_valid());
-
-  cur_addr_ = cur_page->ObjectAreaStart();
-  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
-
-  if (cur_addr_ == end_addr_) return NULL;
-  ASSERT(cur_addr_ < cur_limit_);
-#ifdef DEBUG
-  Verify();
-#endif
-  return FromCurrentPage();
-}
-
-
-#ifdef DEBUG
-void HeapObjectIterator::Verify() {
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  ASSERT(p == Page::FromAllocationTop(cur_limit_));
-  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
-  prev_page_ = NULL;
-  switch (mode) {
-    case PAGES_IN_USE:
-      stop_page_ = space->AllocationTopPage();
-      break;
-    case PAGES_USED_BY_MC:
-      stop_page_ = space->MCRelocationTopPage();
-      break;
-    case ALL_PAGES:
-#ifdef DEBUG
-      // Verify that the cached last page in the space is actually the
-      // last page.
-      for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
-        if (!p->next_page()->is_valid()) {
-          ASSERT(space->last_page_ == p);
-        }
-      }
-#endif
-      stop_page_ = space->last_page_;
-      break;
+// We have hit the end of the page and should advance to the next block of
+// objects.  This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+  ASSERT(cur_addr_ == cur_end_);
+  if (page_mode_ == kOnePageOnly) return false;
+  Page* cur_page;
+  if (cur_addr_ == NULL) {
+    cur_page = space_->anchor();
+  } else {
+    cur_page = Page::FromAddress(cur_addr_ - 1);
+    ASSERT(cur_addr_ == cur_page->area_end());
   }
+  cur_page = cur_page->next_page();
+  if (cur_page == space_->anchor()) return false;
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
+  ASSERT(cur_page->WasSweptPrecisely());
+  return true;
 }
 
 
@@ -171,7 +146,12 @@
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(code_range_->size() == requested);
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+  Address base = reinterpret_cast<Address>(code_range_->address());
+  Address aligned_base =
+      RoundUp(reinterpret_cast<Address>(code_range_->address()),
+              MemoryChunk::kAlignment);
+  size_t size = code_range_->size() - (aligned_base - base);
+  allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
   return true;
 }
@@ -228,7 +208,8 @@
 
 
 
-void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+Address CodeRange::AllocateRawMemory(const size_t requested,
+                                     size_t* allocated) {
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested > allocation_list_[current_allocation_block_index_].size) {
     // Find an allocation block large enough.  This function call may
@@ -236,14 +217,19 @@
     GetNextAllocationBlock(requested);
   }
   // Commit the requested memory at the start of the current allocation block.
-  *allocated = RoundUp(requested, Page::kPageSize);
+  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
   FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (*allocated >= current.size - Page::kPageSize) {
+  if (aligned_requested >= (current.size - Page::kPageSize)) {
     // Don't leave a small free block, useless for a large object or chunk.
     *allocated = current.size;
+  } else {
+    *allocated = aligned_requested;
   }
   ASSERT(*allocated <= current.size);
-  if (!code_range_->Commit(current.start, *allocated, true)) {
+  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
+  if (!MemoryAllocator::CommitCodePage(code_range_,
+                                       current.start,
+                                       *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -256,7 +242,8 @@
 }
 
 
-void CodeRange::FreeRawMemory(void* address, size_t length) {
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
   free_list_.Add(FreeBlock(address, length));
   code_range_->Uncommit(address, length);
 }
@@ -274,35 +261,12 @@
 // MemoryAllocator
 //
 
-// 270 is an estimate based on the static default heap size of a pair of 256K
-// semispaces and a 64M old generation.
-const int kEstimatedNumberOfChunks = 270;
-
-
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
-      size_executable_(0),
-      initial_chunk_(NULL),
-      chunks_(kEstimatedNumberOfChunks),
-      free_chunk_ids_(kEstimatedNumberOfChunks),
-      max_nof_chunks_(0),
-      top_(0) {
-}
-
-
-void MemoryAllocator::Push(int free_chunk_id) {
-  ASSERT(max_nof_chunks_ > 0);
-  ASSERT(top_ < max_nof_chunks_);
-  free_chunk_ids_[top_++] = free_chunk_id;
-}
-
-
-int MemoryAllocator::Pop() {
-  ASSERT(top_ > 0);
-  return free_chunk_ids_[--top_];
+      size_executable_(0) {
 }
 
 
@@ -311,112 +275,362 @@
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
-  // Over-estimate the size of chunks_ array.  It assumes the expansion of old
-  // space is always in the unit of a chunk (kChunkSize) except the last
-  // expansion.
-  //
-  // Due to alignment, allocated space might be one page less than required
-  // number (kPagesPerChunk) of pages for old spaces.
-  //
-  // Reserve two chunk ids for semispaces, one for map space, one for old
-  // space, and one for code space.
-  max_nof_chunks_ =
-      static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
-  if (max_nof_chunks_ > kMaxNofChunks) return false;
-
   size_ = 0;
   size_executable_ = 0;
-  ChunkInfo info;  // uninitialized element.
-  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
-    chunks_.Add(info);
-    free_chunk_ids_.Add(i);
-  }
-  top_ = max_nof_chunks_;
+
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  for (int i = 0; i < max_nof_chunks_; i++) {
-    if (chunks_[i].address() != NULL) DeleteChunk(i);
-  }
-  chunks_.Clear();
-  free_chunk_ids_.Clear();
-
-  if (initial_chunk_ != NULL) {
-    LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-  }
-
-  ASSERT(top_ == max_nof_chunks_);  // all chunks are free
-  top_ = 0;
+  // Check that spaces were torn down before MemoryAllocator.
+  ASSERT(size_ == 0);
+  // TODO(gc) this will be true again when we fix FreeMemory.
+  // ASSERT(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
-  size_ = 0;
-  max_nof_chunks_ = 0;
 }
 
 
-void* MemoryAllocator::AllocateRawMemory(const size_t requested,
-                                         size_t* allocated,
-                                         Executability executable) {
-  if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
-    return NULL;
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(reservation->IsReserved());
+  size_t size = reservation->size();
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  // Code which is part of the code-range does not have its own VirtualMemory.
+  ASSERT(!isolate_->code_range()->contains(
+      static_cast<Address>(reservation->address())));
+  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+  reservation->Release();
+}
+
+
+void MemoryAllocator::FreeMemory(Address base,
+                                 size_t size,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+    ASSERT(executable == EXECUTABLE);
+    isolate_->code_range()->FreeRawMemory(base, size);
+  } else {
+    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+    bool result = VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    ASSERT(result);
+  }
+}
+
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size,
+                                              size_t alignment,
+                                              VirtualMemory* controller) {
+  VirtualMemory reservation(size, alignment);
+
+  if (!reservation.IsReserved()) return NULL;
+  size_ += reservation.size();
+  Address base = RoundUp(static_cast<Address>(reservation.address()),
+                         alignment);
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+                                               size_t alignment,
+                                               Executability executable,
+                                               VirtualMemory* controller) {
+  VirtualMemory reservation;
+  Address base = ReserveAlignedMemory(size, alignment, &reservation);
+  if (base == NULL) return NULL;
+
+  if (executable == EXECUTABLE) {
+    CommitCodePage(&reservation, base, size);
+  } else {
+    if (!reservation.Commit(base,
+                            size,
+                            executable == EXECUTABLE)) {
+      return NULL;
+    }
   }
 
-  void* mem;
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+  set_owner(owner);
+  set_prev_page(this);
+  set_next_page(this);
+}
+
+
+NewSpacePage* NewSpacePage::Initialize(Heap* heap,
+                                       Address start,
+                                       SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
+  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
+                                               start,
+                                               Page::kPageSize,
+                                               area_start,
+                                               area_end,
+                                               NOT_EXECUTABLE,
+                                               semi_space);
+  chunk->set_next_chunk(NULL);
+  chunk->set_prev_chunk(NULL);
+  chunk->initialize_scan_on_scavenge(true);
+  bool in_to_space = (semi_space->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
+
+
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+  set_owner(semi_space);
+  set_next_chunk(this);
+  set_prev_chunk(this);
+  // Flags marks this invalid page as not being in new-space.
+  // All real new-space pages will be in new-space.
+  SetFlags(0, ~0);
+}
+
+
+MemoryChunk* MemoryChunk::Initialize(Heap* heap,
+                                     Address base,
+                                     size_t size,
+                                     Address area_start,
+                                     Address area_end,
+                                     Executability executable,
+                                     Space* owner) {
+  MemoryChunk* chunk = FromAddress(base);
+
+  ASSERT(base == chunk->address());
+
+  chunk->heap_ = heap;
+  chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
+  chunk->flags_ = 0;
+  chunk->set_owner(owner);
+  chunk->InitializeReservedMemory();
+  chunk->slots_buffer_ = NULL;
+  chunk->skip_list_ = NULL;
+  chunk->ResetLiveBytes();
+  Bitmap::Clear(chunk);
+  chunk->initialize_scan_on_scavenge(false);
+  chunk->SetFlag(WAS_SWEPT_PRECISELY);
+
+  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
   if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
+
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
+
+  return chunk;
+}
+
+
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+  next_chunk_ = other->next_chunk_;
+  prev_chunk_ = other;
+  other->next_chunk_->prev_chunk_ = this;
+  other->next_chunk_ = this;
+}
+
+
+void MemoryChunk::Unlink() {
+  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
+    heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  next_chunk_->prev_chunk_ = prev_chunk_;
+  prev_chunk_->next_chunk_ = next_chunk_;
+  prev_chunk_ = NULL;
+  next_chunk_ = NULL;
+}
+
+
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+                                            Executability executable,
+                                            Space* owner) {
+  size_t chunk_size;
+  Heap* heap = isolate_->heap();
+  Address base = NULL;
+  VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
+  if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+                         OS::CommitPageSize()) + CodePageGuardSize();
+
     // Check executable memory limit.
-    if (size_executable_ + requested >
-        static_cast<size_t>(capacity_executable_)) {
+    if (size_executable_ + chunk_size > capacity_executable_) {
       LOG(isolate_,
           StringEvent("MemoryAllocator::AllocateRawMemory",
                       "V8 Executable Allocation capacity exceeded"));
       return NULL;
     }
+
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range()->exists()) {
-      mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
+                       MemoryChunk::kAlignment));
+      if (base == NULL) return NULL;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
     } else {
-      mem = OS::Allocate(requested, allocated, true);
+      base = AllocateAlignedMemory(chunk_size,
+                                   MemoryChunk::kAlignment,
+                                   executable,
+                                   &reservation);
+      if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
     }
-    // Update executable memory size.
-    size_executable_ += static_cast<int>(*allocated);
-  } else {
-    mem = OS::Allocate(requested, allocated, false);
-  }
-  int alloced = static_cast<int>(*allocated);
-  size_ += alloced;
 
 #ifdef DEBUG
-  ZapBlock(reinterpret_cast<Address>(mem), alloced);
+    ZapBlock(base, CodePageGuardStartOffset());
+    ZapBlock(base + CodePageAreaStartOffset(), body_size);
 #endif
-  isolate_->counters()->memory_allocated()->Increment(alloced);
-  return mem;
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + body_size;
+  } else {
+    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+    base = AllocateAlignedMemory(chunk_size,
+                                 MemoryChunk::kAlignment,
+                                 executable,
+                                 &reservation);
+
+    if (base == NULL) return NULL;
+
+#ifdef DEBUG
+    ZapBlock(base, chunk_size);
+#endif
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = base + chunk_size;
+  }
+
+  isolate_->counters()->memory_allocated()->
+      Increment(static_cast<int>(chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  }
+
+  MemoryChunk* result = MemoryChunk::Initialize(heap,
+                                                base,
+                                                chunk_size,
+                                                area_start,
+                                                area_end,
+                                                executable,
+                                                owner);
+  result->set_reserved_memory(&reservation);
+  return result;
 }
 
 
-void MemoryAllocator::FreeRawMemory(void* mem,
-                                    size_t length,
+Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-#ifdef DEBUG
-  // Do not try to zap the guard page.
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
-#endif
-  if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
-    isolate_->code_range()->FreeRawMemory(mem, length);
-  } else {
-    OS::Free(mem, length);
-  }
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
-  size_ -= static_cast<int>(length);
-  if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
+                                     executable,
+                                     owner);
 
-  ASSERT(size_ >= 0);
-  ASSERT(size_executable_ >= 0);
+  if (chunk == NULL) return NULL;
+
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+                                              Executability executable,
+                                              Space* owner) {
+  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+  if (chunk == NULL) return NULL;
+  return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+  if (chunk->owner() != NULL) {
+    ObjectSpace space =
+        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+  }
+
+  delete chunk->slots_buffer();
+  delete chunk->skip_list();
+
+  VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    FreeMemory(reservation, chunk->executable());
+  } else {
+    FreeMemory(chunk->address(),
+               chunk->size(),
+               chunk->executable());
+  }
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start,
+                                  size_t size,
+                                  Executability executable) {
+  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+#ifdef DEBUG
+  ZapBlock(start, size);
+#endif
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  if (!VirtualMemory::UncommitRegion(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
 }
 
 
@@ -465,269 +679,6 @@
   UNREACHABLE();
 }
 
-void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
-  ASSERT(initial_chunk_ == NULL);
-
-  initial_chunk_ = new VirtualMemory(requested);
-  CHECK(initial_chunk_ != NULL);
-  if (!initial_chunk_->IsReserved()) {
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-    return NULL;
-  }
-
-  // We are sure that we have mapped a block of requested addresses.
-  ASSERT(initial_chunk_->size() == requested);
-  LOG(isolate_,
-      NewEvent("InitialChunk", initial_chunk_->address(), requested));
-  size_ += static_cast<int>(requested);
-  return initial_chunk_->address();
-}
-
-
-static int PagesInChunk(Address start, size_t size) {
-  // The first page starts on the first page-aligned address from start onward
-  // and the last page ends on the last page-aligned address before
-  // start+size.  Page::kPageSize is a power of two so we can divide by
-  // shifting.
-  return static_cast<int>((RoundDown(start + size, Page::kPageSize)
-      - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
-}
-
-
-Page* MemoryAllocator::AllocatePages(int requested_pages,
-                                     int* allocated_pages,
-                                     PagedSpace* owner) {
-  if (requested_pages <= 0) return Page::FromAddress(NULL);
-  size_t chunk_size = requested_pages * Page::kPageSize;
-
-  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
-  if (chunk == NULL) return Page::FromAddress(NULL);
-  LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
-
-  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
-
-  // We may 'lose' a page due to alignment.
-  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
-
-  size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
-
-  // Check that we got at least one page that we can use.
-  if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
-    FreeRawMemory(chunk,
-                  chunk_size,
-                  owner->executable());
-    LOG(isolate_, DeleteEvent("PagedChunk", chunk));
-    return Page::FromAddress(NULL);
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(chunk, guard_size);
-    chunk_size -= guard_size;
-    chunk = static_cast<Address>(chunk) + guard_size;
-    --*allocated_pages;
-  }
-
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
-
-  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-  PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
-
-  return new_pages;
-}
-
-
-Page* MemoryAllocator::CommitPages(Address start, size_t size,
-                                   PagedSpace* owner, int* num_pages) {
-  ASSERT(start != NULL);
-  *num_pages = PagesInChunk(start, size);
-  ASSERT(*num_pages > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
-    return Page::FromAddress(NULL);
-  }
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-
-  // So long as we correctly overestimated the number of chunks we should not
-  // run out of chunk ids.
-  CHECK(!OutOfChunkIds());
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(start, size, owner);
-  return InitializePagesInChunk(chunk_id, *num_pages, owner);
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
-                                  Executability executable) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Commit(start, size, executable)) return false;
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-  return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Uncommit(start, size)) return false;
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
-  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
-    Memory::Address_at(start + s) = kZapValue;
-  }
-}
-
-
-Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
-                                              PagedSpace* owner) {
-  ASSERT(IsValidChunk(chunk_id));
-  ASSERT(pages_in_chunk > 0);
-
-  Address chunk_start = chunks_[chunk_id].address();
-
-  Address low = RoundUp(chunk_start, Page::kPageSize);
-
-#ifdef DEBUG
-  size_t chunk_size = chunks_[chunk_id].size();
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(pages_in_chunk <=
-        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
-#endif
-
-  Address page_addr = low;
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->heap_ = owner->heap();
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    p->InvalidateWatermark(true);
-    p->SetIsLargeObjectPage(false);
-    p->SetAllocationWatermark(p->ObjectAreaStart());
-    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
-    page_addr += Page::kPageSize;
-  }
-
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FreePages(Page* p) {
-  if (!p->is_valid()) return p;
-
-  // Find the first page in the same chunk as 'p'
-  Page* first_page = FindFirstPageInSameChunk(p);
-  Page* page_to_return = Page::FromAddress(NULL);
-
-  if (p != first_page) {
-    // Find the last page in the same chunk as 'prev'.
-    Page* last_page = FindLastPageInSameChunk(p);
-    first_page = GetNextPage(last_page);  // first page in next chunk
-
-    // set the next_page of last_page to NULL
-    SetNextPage(last_page, Page::FromAddress(NULL));
-    page_to_return = p;  // return 'p' when exiting
-  }
-
-  while (first_page->is_valid()) {
-    int chunk_id = GetChunkId(first_page);
-    ASSERT(IsValidChunk(chunk_id));
-
-    // Find the first page of the next chunk before deleting this chunk.
-    first_page = GetNextPage(FindLastPageInSameChunk(first_page));
-
-    // Free the current chunk.
-    DeleteChunk(chunk_id);
-  }
-
-  return page_to_return;
-}
-
-
-void MemoryAllocator::FreeAllPages(PagedSpace* space) {
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    if (chunks_[i].owner() == space) {
-      DeleteChunk(i);
-    }
-  }
-}
-
-
-void MemoryAllocator::DeleteChunk(int chunk_id) {
-  ASSERT(IsValidChunk(chunk_id));
-
-  ChunkInfo& c = chunks_[chunk_id];
-
-  // We cannot free a chunk contained in the initial chunk because it was not
-  // allocated with AllocateRawMemory.  Instead we uncommit the virtual
-  // memory.
-  if (InInitialChunk(c.address())) {
-    // TODO(1240712): VirtualMemory::Uncommit has a return value which
-    // is ignored here.
-    initial_chunk_->Uncommit(c.address(), c.size());
-    Counters* counters = isolate_->counters();
-    counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
-  } else {
-    LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
-    ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
-    size_t size = c.size();
-    size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
-    FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
-    PerformAllocationCallback(space, kAllocationActionFree, size);
-  }
-  c.init(NULL, 0, NULL);
-  Push(chunk_id);
-}
-
-
-Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address chunk_start = chunks_[chunk_id].address();
-  size_t chunk_size = chunks_[chunk_id].size();
-
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(chunk_start <= p->address() && p->address() < high);
-
-  return Page::FromAddress(high - Page::kPageSize);
-}
-
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
@@ -740,71 +691,61 @@
 #endif
 
 
-void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
-                                                 Page** first_page,
-                                                 Page** last_page,
-                                                 Page** last_page_in_use) {
-  Page* first = NULL;
-  Page* last = NULL;
-
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    ChunkInfo& chunk = chunks_[i];
-
-    if (chunk.owner() == space) {
-      if (first == NULL) {
-        Address low = RoundUp(chunk.address(), Page::kPageSize);
-        first = Page::FromAddress(low);
-      }
-      last = RelinkPagesInChunk(i,
-                                chunk.address(),
-                                chunk.size(),
-                                last,
-                                last_page_in_use);
-    }
-  }
-
-  if (first_page != NULL) {
-    *first_page = first;
-  }
-
-  if (last_page != NULL) {
-    *last_page = last;
-  }
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
 }
 
 
-Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
-                                          Address chunk_start,
-                                          size_t chunk_size,
-                                          Page* prev,
-                                          Page** last_page_in_use) {
-  Address page_addr = RoundUp(chunk_start, Page::kPageSize);
-  int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
+int MemoryAllocator::CodePageGuardSize() {
+  return OS::CommitPageSize();
+}
 
-  if (prev->is_valid()) {
-    SetNextPage(prev, Page::FromAddress(page_addr));
+
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - OS::CommitPageSize();
+}
+
+
+bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
+                                     Address start,
+                                     size_t size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start,
+                  CodePageGuardStartOffset(),
+                  false)) {
+    return false;
   }
 
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    page_addr += Page::kPageSize;
-
-    p->InvalidateWatermark(true);
-    if (p->WasInUseBeforeMC()) {
-      *last_page_in_use = p;
-    }
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
   }
 
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  if (last_page->WasInUseBeforeMC()) {
-    *last_page_in_use = last_page;
+  // Commit page body (executable).
+  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  area_size,
+                  true)) {
+    return false;
   }
 
-  return last_page;
+  // Create guard page after the allocatable area.
+  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+    return false;
+  }
+
+  return true;
 }
 
 
@@ -815,296 +756,168 @@
                        intptr_t max_capacity,
                        AllocationSpace id,
                        Executability executable)
-    : Space(heap, id, executable) {
+    : Space(heap, id, executable),
+      free_list_(this),
+      was_swept_conservatively_(false),
+      first_unswept_page_(Page::FromAddress(NULL)) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->
+        CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-                  * Page::kObjectAreaSize;
+      * AreaSize();
   accounting_stats_.Clear();
 
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
 
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  anchor_.InitializeAsAnchor(this);
 }
 
 
-bool PagedSpace::Setup(Address start, size_t size) {
-  if (HasBeenSetup()) return false;
-
-  int num_pages = 0;
-  // Try to use the virtual memory range passed to us.  If it is too small to
-  // contain at least one page, ignore it and allocate instead.
-  int pages_in_chunk = PagesInChunk(start, size);
-  if (pages_in_chunk > 0) {
-    first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
-        RoundUp(start, Page::kPageSize),
-        Page::kPageSize * pages_in_chunk,
-        this, &num_pages);
-  } else {
-    int requested_pages =
-        Min(MemoryAllocator::kPagesPerChunk,
-            static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
-    first_page_ =
-        Isolate::Current()->memory_allocator()->AllocatePages(
-            requested_pages, &num_pages, this);
-    if (!first_page_->is_valid()) return false;
-  }
-
-  // We are sure that the first page is valid and that we have at least one
-  // page.
-  ASSERT(first_page_->is_valid());
-  ASSERT(num_pages > 0);
-  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
-  ASSERT(Capacity() <= max_capacity_);
-
-  // Sequentially clear region marks in the newly allocated
-  // pages and cache the current last page in the space.
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-  }
-
-  // Use first_page_ for allocation.
-  SetAllocationInfo(&allocation_info_, first_page_);
-
-  page_list_is_chunk_ordered_ = true;
-
+bool PagedSpace::Setup() {
   return true;
 }
 
 
 bool PagedSpace::HasBeenSetup() {
-  return (Capacity() > 0);
+  return true;
 }
 
 
 void PagedSpace::TearDown() {
-  Isolate::Current()->memory_allocator()->FreeAllPages(this);
-  first_page_ = NULL;
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    heap()->isolate()->memory_allocator()->Free(iterator.next());
+  }
+  anchor_.set_next_page(&anchor_);
+  anchor_.set_prev_page(&anchor_);
   accounting_stats_.Clear();
 }
 
 
-void PagedSpace::MarkAllPagesClean() {
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  }
-}
-
-
 MaybeObject* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called before or after mark-compact GC
-  // because it accesses map pointers.
+  // Note: this function can only be called on precisely swept spaces.
   ASSERT(!heap()->mark_compact_collector()->in_use());
 
   if (!Contains(addr)) return Failure::Exception();
 
   Page* p = Page::FromAddress(addr);
-  ASSERT(IsUsed(p));
-  Address cur = p->ObjectAreaStart();
-  Address end = p->AllocationTop();
-  while (cur < end) {
-    HeapObject* obj = HeapObject::FromAddress(cur);
+  HeapObjectIterator it(p, NULL);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    Address cur = obj->address();
     Address next = cur + obj->Size();
     if ((cur <= addr) && (addr < next)) return obj;
-    cur = next;
   }
 
   UNREACHABLE();
   return Failure::Exception();
 }
 
-
-bool PagedSpace::IsUsed(Page* page) {
-  PageIterator it(this, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    if (page == it.next()) return true;
-  }
-  return false;
-}
-
-
-void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
-  alloc_info->top = p->ObjectAreaStart();
-  alloc_info->limit = p->ObjectAreaEnd();
-  ASSERT(alloc_info->VerifyPagedAllocation());
-}
-
-
-void PagedSpace::MCResetRelocationInfo() {
-  // Set page indexes.
-  int i = 0;
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    Page* p = it.next();
-    p->mc_page_index = i++;
-  }
-
-  // Set mc_forwarding_info_ to the first page in the space.
-  SetAllocationInfo(&mc_forwarding_info_, first_page_);
-  // All the bytes in the space are 'available'.  We will rediscover
-  // allocated and wasted bytes during GC.
-  accounting_stats_.Reset();
-}
-
-
-int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
-#ifdef DEBUG
-  // The Contains function considers the address at the beginning of a
-  // page in the page, MCSpaceOffsetForAddress considers it is in the
-  // previous page.
-  if (Page::IsAlignedToPageSize(addr)) {
-    ASSERT(Contains(addr - kPointerSize));
-  } else {
-    ASSERT(Contains(addr));
-  }
-#endif
-
-  // If addr is at the end of a page, it belongs to previous page
-  Page* p = Page::IsAlignedToPageSize(addr)
-            ? Page::FromAllocationTop(addr)
-            : Page::FromAddress(addr);
-  int index = p->mc_page_index;
-  return (index * Page::kPageSize) + p->Offset(addr);
-}
-
-
-// Slow case for reallocating and promoting objects during a compacting
-// collection.  This function is not space-specific.
-HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
-  Page* current_page = TopPageOf(mc_forwarding_info_);
-  if (!current_page->next_page()->is_valid()) {
-    if (!Expand(current_page)) {
-      return NULL;
-    }
-  }
-
-  // There are surely more pages in the space now.
-  ASSERT(current_page->next_page()->is_valid());
-  // We do not add the top of page block for current page to the space's
-  // free list---the block may contain live objects so we cannot write
-  // bookkeeping information to it.  Instead, we will recover top of page
-  // blocks when we move objects to their new locations.
-  //
-  // We do however write the allocation pointer to the page.  The encoding
-  // of forwarding addresses is as an offset in terms of live bytes, so we
-  // need quick access to the allocation top of each page to decode
-  // forwarding addresses.
-  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
-  current_page->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
-  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-}
-
-
-bool PagedSpace::Expand(Page* last_page) {
-  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+bool PagedSpace::CanExpand() {
+  ASSERT(max_capacity_ % AreaSize() == 0);
+  ASSERT(Capacity() % AreaSize() == 0);
 
   if (Capacity() == max_capacity_) return false;
 
   ASSERT(Capacity() < max_capacity_);
-  // Last page must be valid and its next page is invalid.
-  ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
 
-  int available_pages =
-      static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
-  // We don't want to have to handle small chunks near the end so if there are
-  // not kPagesPerChunk pages available without exceeding the max capacity then
-  // act as if memory has run out.
-  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
+  // Are we going to exceed capacity for this space?
+  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
 
-  int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
-      desired_pages, &desired_pages, this);
-  if (!p->is_valid()) return false;
+  return true;
+}
 
-  accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
+bool PagedSpace::Expand() {
+  if (!CanExpand()) return false;
+
+  Page* p = heap()->isolate()->memory_allocator()->
+      AllocatePage(this, executable());
+  if (p == NULL) return false;
+
   ASSERT(Capacity() <= max_capacity_);
 
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
-
-  // Sequentially clear region marks of new pages and and cache the
-  // new last page in the space.
-  while (p->is_valid()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-    p = p->next_page();
-  }
+  p->InsertAfter(anchor_.prev_page());
 
   return true;
 }
 
 
-#ifdef DEBUG
 int PagedSpace::CountTotalPages() {
+  PageIterator it(this);
   int count = 0;
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+  while (it.has_next()) {
+    it.next();
     count++;
   }
   return count;
 }
-#endif
 
 
-void PagedSpace::Shrink() {
-  if (!page_list_is_chunk_ordered_) {
-    // We can't shrink space if pages is not chunk-ordered
-    // (see comment for class MemoryAllocator for definition).
-    return;
+void PagedSpace::ReleasePage(Page* page) {
+  ASSERT(page->LiveBytes() == 0);
+  ASSERT(AreaSize() == page->area_size());
+
+  // Adjust list of unswept pages if the page is the head of the list.
+  if (first_unswept_page_ == page) {
+    first_unswept_page_ = page->next_page();
+    if (first_unswept_page_ == anchor()) {
+      first_unswept_page_ = Page::FromAddress(NULL);
+    }
   }
 
-  // Release half of free pages.
-  Page* top_page = AllocationTopPage();
-  ASSERT(top_page->is_valid());
-
-  // Count the number of pages we would like to free.
-  int pages_to_free = 0;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free++;
+  if (page->WasSwept()) {
+    intptr_t size = free_list_.EvictFreeListItems(page);
+    accounting_stats_.AllocateBytes(size);
+    ASSERT_EQ(AreaSize(), static_cast<int>(size));
   }
 
-  // Free pages after top_page.
-  Page* p = heap()->isolate()->memory_allocator()->
-      FreePages(top_page->next_page());
-  heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
-
-  // Find out how many pages we failed to free and update last_page_.
-  // Please note pages can only be freed in whole chunks.
-  last_page_ = top_page;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free--;
-    last_page_ = p;
+  if (Page::FromAllocationTop(allocation_info_.top) == page) {
+    allocation_info_.top = allocation_info_.limit = NULL;
   }
 
-  accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
-  ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
+  }
+
+  ASSERT(Capacity() > 0);
+  ASSERT(Capacity() % AreaSize() == 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
 }
 
 
-bool PagedSpace::EnsureCapacity(int capacity) {
-  if (Capacity() >= capacity) return true;
-
-  // Start from the allocation top and loop to the last page in the space.
-  Page* last_page = AllocationTopPage();
-  Page* next_page = last_page->next_page();
-  while (next_page->is_valid()) {
-    last_page = heap()->isolate()->memory_allocator()->
-        FindLastPageInSameChunk(next_page);
-    next_page = last_page->next_page();
+void PagedSpace::ReleaseAllUnusedPages() {
+  PageIterator it(this);
+  while (it.has_next()) {
+    Page* page = it.next();
+    if (!page->WasSwept()) {
+      if (page->LiveBytes() == 0) ReleasePage(page);
+    } else {
+      HeapObject* obj = HeapObject::FromAddress(page->area_start());
+      if (obj->IsFreeSpace() &&
+          FreeSpace::cast(obj)->size() == AreaSize()) {
+        // Sometimes we allocate memory from free list but don't
+        // immediately initialize it (e.g. see PagedSpace::ReserveSpace
+        // called from Heap::ReserveSpace that can cause GC before
+        // reserved space is actually initialized).
+        // Thus we can't simply assume that obj represents a valid
+        // node still owned by a free list
+        // Instead we should verify that the page is fully covered
+        // by free list items.
+        FreeList::SizeStats sizes;
+        free_list_.CountFreeListItems(page, &sizes);
+        if (sizes.Total() == AreaSize()) {
+          ReleasePage(page);
+        }
+      }
+    }
   }
-
-  // Expand the space until it has the required capacity or expansion fails.
-  do {
-    if (!Expand(last_page)) return false;
-    ASSERT(last_page->next_page()->is_valid());
-    last_page =
-        heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
-            last_page->next_page());
-  } while (Capacity() < capacity);
-
-  return true;
+  heap()->FreeQueuedChunks();
 }
 
 
@@ -1114,61 +927,52 @@
 
 
 #ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
 void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // The allocation pointer should be valid, and it should be in a page in the
-  // space.
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
-  ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
+  // We can only iterate over the pages if they were swept precisely.
+  if (was_swept_conservatively_) return;
 
-  // Loop over all the pages.
-  bool above_allocation_top = false;
-  Page* current_page = first_page_;
-  while (current_page->is_valid()) {
-    if (above_allocation_top) {
-      // We don't care what's above the allocation top.
-    } else {
-      Address top = current_page->AllocationTop();
-      if (current_page == top_page) {
-        ASSERT(top == allocation_info_.top);
-        // The next page will be above the allocation top.
-        above_allocation_top = true;
-      }
-
-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
-
-        // The first word should be a map, and we expect all map pointers to
-        // be in map space.
-        Map* map = object->map();
-        ASSERT(map->IsMap());
-        ASSERT(heap()->map_space()->Contains(map));
-
-        // Perform space-specific object verification.
-        VerifyObject(object);
-
-        // The object itself should look OK.
-        object->Verify();
-
-        // All the interior pointers should be contained in the heap and
-        // have page regions covering intergenerational references should be
-        // marked dirty.
-        int size = object->Size();
-        object->IterateBody(map->instance_type(), size, visitor);
-
-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
+  bool allocation_pointer_found_in_space =
+      (allocation_info_.top == allocation_info_.limit);
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    ASSERT(page->owner() == this);
+    if (page == Page::FromAllocationTop(allocation_info_.top)) {
+      allocation_pointer_found_in_space = true;
     }
+    ASSERT(page->WasSweptPrecisely());
+    HeapObjectIterator it(page, NULL);
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
+    int black_size = 0;
+    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+      ASSERT(end_of_previous_object <= object->address());
 
-    current_page = current_page->next_page();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      ASSERT(map->IsMap());
+      ASSERT(heap()->map_space()->Contains(map));
+
+      // Perform space-specific object verification.
+      VerifyObject(object);
+
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, visitor);
+      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+        black_size += size;
+      }
+
+      ASSERT(object->address() + size <= top);
+      end_of_previous_object = object->address() + size;
+    }
+    ASSERT_LE(black_size, page->LiveBytes());
   }
+  ASSERT(allocation_pointer_found_in_space);
 }
 #endif
 
@@ -1177,13 +981,23 @@
 // NewSpace implementation
 
 
-bool NewSpace::Setup(Address start, int size) {
+bool NewSpace::Setup(int reserved_semispace_capacity,
+                     int maximum_semispace_capacity) {
   // Setup new space based on the preallocated memory block defined by
   // start and size. The provided space is divided into two semi-spaces.
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-  int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
+
+  size_t size = 2 * reserved_semispace_capacity;
+  Address base =
+      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+          size, size, &reservation_);
+  if (base == NULL) return false;
+
+  chunk_base_ = base;
+  chunk_size_ = static_cast<uintptr_t>(size);
+  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1197,31 +1011,29 @@
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
-  ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
-  ASSERT(IsAddressAligned(start, size, 0));
+  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+  ASSERT(static_cast<intptr_t>(chunk_size_) >=
+         2 * heap()->ReservedSemiSpaceSize());
+  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
-  if (!to_space_.Setup(start,
+  if (!to_space_.Setup(chunk_base_,
                        initial_semispace_capacity,
                        maximum_semispace_capacity)) {
     return false;
   }
-  if (!from_space_.Setup(start + maximum_semispace_capacity,
+  if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
                          initial_semispace_capacity,
                          maximum_semispace_capacity)) {
     return false;
   }
 
-  start_ = start;
-  address_mask_ = ~(size - 1);
+  start_ = chunk_base_;
+  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
 
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  ResetAllocationInfo();
 
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   return true;
 }
 
@@ -1239,28 +1051,34 @@
   start_ = NULL;
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
 
   to_space_.TearDown();
   from_space_.TearDown();
+
+  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+  ASSERT(reservation_.IsReserved());
+  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+                                                    NOT_EXECUTABLE);
+  chunk_base_ = NULL;
+  chunk_size_ = 0;
 }
 
 
 void NewSpace::Flip() {
-  SemiSpace tmp = from_space_;
-  from_space_ = to_space_;
-  to_space_ = tmp;
+  SemiSpace::Swap(&from_space_, &to_space_);
 }
 
 
 void NewSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
   ASSERT(Capacity() < MaximumCapacity());
-  if (to_space_.Grow()) {
-    // Only grow from space if we managed to grow to space.
-    if (!from_space_.Grow()) {
-      // If we managed to grow to space but couldn't grow from space,
-      // attempt to shrink to space.
+  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
+  if (to_space_.GrowTo(new_capacity)) {
+    // Only grow from space if we managed to grow to-space.
+    if (!from_space_.GrowTo(new_capacity)) {
+      // If we managed to grow to-space but couldn't grow from-space,
+      // attempt to shrink to-space.
       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1268,21 +1086,20 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
-  int rounded_new_capacity =
-      RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
+  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < Capacity() &&
       to_space_.ShrinkTo(rounded_new_capacity))  {
-    // Only shrink from space if we managed to shrink to space.
+    // Only shrink from-space if we managed to shrink to-space.
+    from_space_.Reset();
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
-      // If we managed to shrink to space but couldn't shrink from
-      // space, attempt to grow to space again.
+      // If we managed to shrink to-space but couldn't shrink from
+      // space, attempt to grow to-space again.
       if (!to_space_.GrowTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1290,36 +1107,98 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
+  allocation_info_.limit = to_space_.page_high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::UpdateAllocationInfo() {
+  allocation_info_.top = to_space_.page_low();
+  allocation_info_.limit = to_space_.page_high();
+
+  // Lower limit during incremental marking.
+  if (heap()->incremental_marking()->IsMarking() &&
+      inline_allocation_limit_step() != 0) {
+    Address new_limit =
+        allocation_info_.top + inline_allocation_limit_step();
+    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+  }
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::ResetAllocationInfo() {
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+  to_space_.Reset();
+  UpdateAllocationInfo();
+  pages_used_ = 0;
+  // Clear all mark-bits in the to-space.
+  NewSpacePageIterator it(&to_space_);
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
 }
 
 
-void NewSpace::MCResetRelocationInfo() {
-  mc_forwarding_info_.top = from_space_.low();
-  mc_forwarding_info_.limit = from_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
+bool NewSpace::AddFreshPage() {
+  Address top = allocation_info_.top;
+  if (NewSpacePage::IsAtStart(top)) {
+    // The current page is already empty. Don't try to make another.
+
+    // We should only get here if someone asks to allocate more
+    // than what can be stored in a single page.
+    // TODO(gc): Change the limit on new-space allocation to prevent this
+    // from happening (all such allocations should go directly to LOSpace).
+    return false;
+  }
+  if (!to_space_.AdvancePage()) {
+    // Failed to get a new page in to-space.
+    return false;
+  }
+
+  // Clear remainder of current page.
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  if (heap()->gc_state() == Heap::SCAVENGE) {
+    heap()->promotion_queue()->SetNewLimit(limit);
+    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
+  }
+
+  int remaining_in_page = static_cast<int>(limit - top);
+  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  pages_used_++;
+  UpdateAllocationInfo();
+
+  return true;
 }
 
 
-void NewSpace::MCCommitRelocationInfo() {
-  // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
-  // valid allocation info for the to space.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top;
+  Address new_top = old_top + size_in_bytes;
+  Address high = to_space_.page_high();
+  if (allocation_info_.limit < high) {
+    // Incremental marking has lowered the limit to get a
+    // chance to do a step.
+    allocation_info_.limit = Min(
+        allocation_info_.limit + inline_allocation_limit_step_,
+        high);
+    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated);
+    top_on_previous_step_ = new_top;
+    return AllocateRaw(size_in_bytes);
+  } else if (AddFreshPage()) {
+    // Switched to new page. Try allocating again.
+    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated);
+    top_on_previous_step_ = to_space_.page_low();
+    return AllocateRaw(size_in_bytes);
+  } else {
+    return Failure::RetryAfterGC();
+  }
 }
 
 
 #ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
+// We do not use the SemiSpaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
 void NewSpace::Verify() {
   // The allocation pointer should be in the space or at the very end.
@@ -1327,59 +1206,53 @@
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.low();
-  while (current < top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
+  Address current = to_space_.first_page()->area_start();
+  CHECK_EQ(current, to_space_.space_start());
 
-    // The first word should be a map, and we expect all map pointers to
-    // be in map space.
-    Map* map = object->map();
-    ASSERT(map->IsMap());
-    ASSERT(heap()->map_space()->Contains(map));
+  while (current != top()) {
+    if (!NewSpacePage::IsAtEnd(current)) {
+      // The allocation pointer should not be in the middle of an object.
+      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+            current < top());
 
-    // The object should not be code or a map.
-    ASSERT(!object->IsMap());
-    ASSERT(!object->IsCode());
+      HeapObject* object = HeapObject::FromAddress(current);
 
-    // The object itself should look OK.
-    object->Verify();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
 
-    // All the interior pointers should be contained in the heap.
-    VerifyPointersVisitor visitor;
-    int size = object->Size();
-    object->IterateBody(map->instance_type(), size, &visitor);
+      // The object should not be code or a map.
+      CHECK(!object->IsMap());
+      CHECK(!object->IsCode());
 
-    current += size;
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      VerifyPointersVisitor visitor;
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, &visitor);
+
+      current += size;
+    } else {
+      // At end of page, switch to next page.
+      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      // Next page should be valid.
+      CHECK(!page->is_anchor());
+      current = page->area_start();
+    }
   }
 
-  // The allocation pointer should not be in the middle of an object.
-  ASSERT(current == top());
+  // Check semi-spaces.
+  ASSERT_EQ(from_space_.id(), kFromSpace);
+  ASSERT_EQ(to_space_.id(), kToSpace);
+  from_space_.Verify();
+  to_space_.Verify();
 }
 #endif
 
-
-bool SemiSpace::Commit() {
-  ASSERT(!is_committed());
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start_, capacity_, executable())) {
-    return false;
-  }
-  committed_ = true;
-  return true;
-}
-
-
-bool SemiSpace::Uncommit() {
-  ASSERT(is_committed());
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      start_, capacity_)) {
-    return false;
-  }
-  committed_ = false;
-  return true;
-}
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
@@ -1392,11 +1265,11 @@
   // otherwise.  In the mark-compact collector, the memory region of the from
   // space is used as the marking stack. It requires contiguous memory
   // addresses.
-  initial_capacity_ = initial_capacity;
+  ASSERT(maximum_capacity >= Page::kPageSize);
+  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   capacity_ = initial_capacity;
-  maximum_capacity_ = maximum_capacity;
+  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
-
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
@@ -1413,81 +1286,258 @@
 }
 
 
-bool SemiSpace::Grow() {
-  // Double the semispace size but only up to maximum capacity.
-  int maximum_extra = maximum_capacity_ - capacity_;
-  int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
-                  maximum_extra);
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), extra, executable())) {
+bool SemiSpace::Commit() {
+  ASSERT(!is_committed());
+  int pages = capacity_ / Page::kPageSize;
+  Address end = start_ + maximum_capacity_;
+  Address start = end - pages * Page::kPageSize;
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+                                                          capacity_,
+                                                          executable())) {
     return false;
   }
-  capacity_ += extra;
+
+  NewSpacePage* page = anchor();
+  for (int i = 1; i <= pages; i++) {
+    NewSpacePage* new_page =
+      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
+    new_page->InsertAfter(page);
+    page = new_page;
+  }
+
+  committed_ = true;
+  Reset();
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  ASSERT(is_committed());
+  Address start = start_ + maximum_capacity_ - capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
+    return false;
+  }
+  anchor()->set_next_page(anchor());
+  anchor()->set_prev_page(anchor());
+
+  committed_ = false;
   return true;
 }
 
 
 bool SemiSpace::GrowTo(int new_capacity) {
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity <= maximum_capacity_);
   ASSERT(new_capacity > capacity_);
+  int pages_before = capacity_ / Page::kPageSize;
+  int pages_after = new_capacity / Page::kPageSize;
+
+  Address end = start_ + maximum_capacity_;
+  Address start = end - new_capacity;
   size_t delta = new_capacity - capacity_;
+
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), delta, executable())) {
+      start, delta, executable())) {
     return false;
   }
   capacity_ = new_capacity;
+  NewSpacePage* last_page = anchor()->prev_page();
+  ASSERT(last_page != anchor());
+  for (int i = pages_before + 1; i <= pages_after; i++) {
+    Address page_address = end - i * Page::kPageSize;
+    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
+                                                      page_address,
+                                                      this);
+    new_page->InsertAfter(last_page);
+    Bitmap::Clear(new_page);
+    // Duplicate the flags that was set on the old page.
+    new_page->SetFlags(last_page->GetFlags(),
+                       NewSpacePage::kCopyOnFlipFlagsMask);
+    last_page = new_page;
+  }
   return true;
 }
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity >= initial_capacity_);
   ASSERT(new_capacity < capacity_);
+  // Semispaces grow backwards from the end of their allocated capacity,
+  // so we find the before and after start addresses relative to the
+  // end of the space.
+  Address space_end = start_ + maximum_capacity_;
+  Address old_start = space_end - capacity_;
   size_t delta = capacity_ - new_capacity;
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      high() - delta, delta)) {
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
     return false;
   }
   capacity_ = new_capacity;
+
+  int pages_after = capacity_ / Page::kPageSize;
+  NewSpacePage* new_last_page =
+      NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+  new_last_page->set_next_page(anchor());
+  anchor()->set_prev_page(new_last_page);
+  ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+
   return true;
 }
 
 
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+  anchor_.set_owner(this);
+  // Fixup back-pointers to anchor. Address of anchor changes
+  // when we swap.
+  anchor_.prev_page()->set_next_page(&anchor_);
+  anchor_.next_page()->set_prev_page(&anchor_);
+
+  bool becomes_to_space = (id_ == kFromSpace);
+  id_ = becomes_to_space ? kToSpace : kFromSpace;
+  NewSpacePage* page = anchor_.next_page();
+  while (page != &anchor_) {
+    page->set_owner(this);
+    page->SetFlags(flags, mask);
+    if (becomes_to_space) {
+      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+      page->SetFlag(MemoryChunk::IN_TO_SPACE);
+      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+      page->ResetLiveBytes();
+    } else {
+      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+    }
+    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::Reset() {
+  ASSERT(anchor_.next_page() != &anchor_);
+  current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+  // We won't be swapping semispaces without data in them.
+  ASSERT(from->anchor_.next_page() != &from->anchor_);
+  ASSERT(to->anchor_.next_page() != &to->anchor_);
+
+  // Swap bits.
+  SemiSpace tmp = *from;
+  *from = *to;
+  *to = tmp;
+
+  // Fixup back-pointers to the page list anchor now that its address
+  // has changed.
+  // Swap to/from-space bits on pages.
+  // Copy GC flags from old active space (from-space) to new (to-space).
+  intptr_t flags = from->current_page()->GetFlags();
+  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+  from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  age_mark_ = mark;
+  // Mark all pages up to the one containing mark.
+  NewSpacePageIterator it(space_start(), mark);
+  while (it.has_next()) {
+    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  }
+}
+
+
 #ifdef DEBUG
 void SemiSpace::Print() { }
 
 
-void SemiSpace::Verify() { }
+void SemiSpace::Verify() {
+  bool is_from_space = (id_ == kFromSpace);
+  NewSpacePage* page = anchor_.next_page();
+  CHECK(anchor_.semi_space() == this);
+  while (page != &anchor_) {
+    CHECK(page->semi_space() == this);
+    CHECK(page->InNewSpace());
+    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+                                        : MemoryChunk::IN_TO_SPACE));
+    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+                                         : MemoryChunk::IN_FROM_SPACE));
+    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+    if (!is_from_space) {
+      // The pointers-from-here-are-interesting flag isn't updated dynamically
+      // on from-space pages, so it might be out of sync with the marking state.
+      if (page->heap()->incremental_marking()->IsMarking()) {
+        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      } else {
+        CHECK(!page->IsFlagSet(
+            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      }
+      // TODO(gc): Check that the live_bytes_count_ field matches the
+      // black marking on the page (if we make it match in new-space).
+    }
+    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    CHECK(page->prev_page()->next_page() == page);
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::AssertValidRange(Address start, Address end) {
+  // Addresses belong to same semi-space
+  NewSpacePage* page = NewSpacePage::FromLimit(start);
+  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+  SemiSpace* space = page->semi_space();
+  CHECK_EQ(space, end_page->semi_space());
+  // Start address is before end address, either on same page,
+  // or end address is on a later page in the linked list of
+  // semi-space pages.
+  if (page == end_page) {
+    CHECK(start <= end);
+  } else {
+    while (page != end_page) {
+      page = page->next_page();
+      CHECK_NE(page, space->anchor());
+    }
+  }
+}
 #endif
 
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator implementation.
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space, space->bottom(), space->top(), NULL);
+  Initialize(space->bottom(), space->top(), NULL);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
                                      HeapObjectCallback size_func) {
-  Initialize(space, space->bottom(), space->top(), size_func);
+  Initialize(space->bottom(), space->top(), size_func);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(space, start, space->top(), NULL);
+  Initialize(start, space->top(), NULL);
 }
 
 
-void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+  Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start,
                                    Address end,
                                    HeapObjectCallback size_func) {
-  ASSERT(space->ToSpaceContains(start));
-  ASSERT(space->ToSpaceLow() <= end
-         && end <= space->ToSpaceHigh());
-  space_ = &space->to_space_;
+  SemiSpace::AssertValidRange(start, end);
   current_ = start;
   limit_ = end;
   size_func_ = size_func;
@@ -1623,7 +1673,7 @@
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
     RecordAllocation(obj);
 }
 
@@ -1699,7 +1749,6 @@
   promoted_histogram_[type].increment_bytes(obj->Size());
 }
 
-
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
@@ -1708,493 +1757,439 @@
   ASSERT(IsAligned(size_in_bytes, kPointerSize));
 
   // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a ByteArray with at least one extra word (the next
-  // pointer), we set its map to be the byte array map and its size to an
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
   // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
-  if (size_in_bytes > ByteArray::kHeaderSize) {
-    set_map(heap->raw_unchecked_byte_array_map());
-    // Can't use ByteArray::cast because it fails during deserialization.
-    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
-    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    set_map_unsafe(heap->raw_unchecked_free_space_map());
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->set_size(size_in_bytes);
   } else if (size_in_bytes == kPointerSize) {
-    set_map(heap->raw_unchecked_one_pointer_filler_map());
+    set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map(heap->raw_unchecked_two_pointer_filler_map());
+    set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
-  // deserialization because the byte array map is not done yet.
+  // deserialization because the free space map is not done yet.
 }
 
 
-Address FreeListNode::next(Heap* heap) {
+FreeListNode* FreeListNode::next() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
-    ASSERT(Size() >= kNextOffset + kPointerSize);
-    return Memory::Address_at(address() + kNextOffset);
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
   } else {
-    return Memory::Address_at(address() + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
   }
 }
 
 
-void FreeListNode::set_next(Heap* heap, Address next) {
+FreeListNode** FreeListNode::next_address() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
-    Memory::Address_at(address() + kNextOffset) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
   } else {
-    Memory::Address_at(address() + kPointerSize) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
   }
 }
 
 
-OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
-  : heap_(heap),
-    owner_(owner) {
+void FreeListNode::set_next(FreeListNode* next) {
+  ASSERT(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) =
+        reinterpret_cast<Address>(next);
+  } else {
+    Memory::Address_at(address() + kPointerSize) =
+        reinterpret_cast<Address>(next);
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner), heap_(owner->heap()) {
   Reset();
 }
 
 
-void OldSpaceFreeList::Reset() {
+void FreeList::Reset() {
   available_ = 0;
-  for (int i = 0; i < kFreeListsLength; i++) {
-    free_[i].head_node_ = NULL;
-  }
-  needs_rebuild_ = false;
-  finger_ = kHead;
-  free_[kHead].next_size_ = kEnd;
+  small_list_ = NULL;
+  medium_list_ = NULL;
+  large_list_ = NULL;
+  huge_list_ = NULL;
 }
 
 
-void OldSpaceFreeList::RebuildSizeList() {
-  ASSERT(needs_rebuild_);
-  int cur = kHead;
-  for (int i = cur + 1; i < kFreeListsLength; i++) {
-    if (free_[i].head_node_ != NULL) {
-      free_[cur].next_size_ = i;
-      cur = i;
-    }
-  }
-  free_[cur].next_size_ = kEnd;
-  needs_rebuild_ = false;
-}
-
-
-int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
-#endif
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
   FreeListNode* node = FreeListNode::FromAddress(start);
   node->set_size(heap_, size_in_bytes);
 
-  // We don't use the freelists in compacting mode.  This makes it more like a
-  // GC that only has mark-sweep-compact and doesn't have a mark-sweep
-  // collector.
-  if (FLAG_always_compact) {
-    return size_in_bytes;
-  }
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) return size_in_bytes;
 
-  // Early return to drop too-small blocks on the floor (one or two word
-  // blocks cannot hold a map pointer, a size field, and a pointer to the
-  // next block in the free list).
-  if (size_in_bytes < kMinBlockSize) {
-    return size_in_bytes;
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    node->set_next(small_list_);
+    small_list_ = node;
+  } else if (size_in_bytes <= kMediumListMax) {
+    node->set_next(medium_list_);
+    medium_list_ = node;
+  } else if (size_in_bytes <= kLargeListMax) {
+    node->set_next(large_list_);
+    large_list_ = node;
+  } else {
+    node->set_next(huge_list_);
+    huge_list_ = node;
   }
-
-  // Insert other blocks at the head of an exact free list.
-  int index = size_in_bytes >> kPointerSizeLog2;
-  node->set_next(heap_, free_[index].head_node_);
-  free_[index].head_node_ = node->address();
   available_ += size_in_bytes;
-  needs_rebuild_ = true;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
   return 0;
 }
 
 
-MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
-  ASSERT(0 < size_in_bytes);
-  ASSERT(size_in_bytes <= kMaxBlockSize);
-  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+  FreeListNode* node = *list;
 
-  if (needs_rebuild_) RebuildSizeList();
-  int index = size_in_bytes >> kPointerSizeLog2;
-  // Check for a perfect fit.
-  if (free_[index].head_node_ != NULL) {
-    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
-    // If this was the last block of its size, remove the size.
-    if ((free_[index].head_node_ = node->next(heap_)) == NULL)
-      RemoveSize(index);
-    available_ -= size_in_bytes;
-    *wasted_bytes = 0;
-    ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-    return node;
+  if (node == NULL) return NULL;
+
+  while (node != NULL &&
+         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+    available_ -= node->Size();
+    node = node->next();
   }
-  // Search the size list for the best fit.
-  int prev = finger_ < index ? finger_ : kHead;
-  int cur = FindSize(index, &prev);
-  ASSERT(index < cur);
-  if (cur == kEnd) {
-    // No large enough size in list.
-    *wasted_bytes = 0;
-    return Failure::RetryAfterGC(owner_);
-  }
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  int rem = cur - index;
-  int rem_bytes = rem << kPointerSizeLog2;
-  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
-  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
-  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
-                                                     size_in_bytes);
-  // Distinguish the cases prev < rem < cur and rem <= prev < cur
-  // to avoid many redundant tests and calls to Insert/RemoveSize.
-  if (prev < rem) {
-    // Simple case: insert rem between prev and cur.
-    finger_ = prev;
-    free_[prev].next_size_ = rem;
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      free_[rem].next_size_ = free_[cur].next_size_;
-    } else {
-      free_[rem].next_size_ = cur;
-    }
-    // Add the remainder block.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
+
+  if (node != NULL) {
+    *node_size = node->Size();
+    *list = node->next();
   } else {
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      finger_ = prev;
-      free_[prev].next_size_ = free_[cur].next_size_;
-    }
-    if (rem_bytes < kMinBlockSize) {
-      // Too-small remainder is wasted.
-      rem_node->set_size(heap_, rem_bytes);
-      available_ -= size_in_bytes + rem_bytes;
-      *wasted_bytes = rem_bytes;
-      return cur_node;
-    }
-    // Add the remainder block and, if needed, insert its size.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
-    if (rem_node->next(heap_) == NULL) InsertSize(rem);
-  }
-  available_ -= size_in_bytes;
-  *wasted_bytes = 0;
-  return cur_node;
-}
-
-
-void OldSpaceFreeList::MarkNodes() {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      cur_addr = cur_node->next(heap_);
-      cur_node->SetMark();
-    }
-  }
-}
-
-
-#ifdef DEBUG
-bool OldSpaceFreeList::Contains(FreeListNode* node) {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      if (cur_node == node) return true;
-      cur_addr = cur_node->next(heap_);
-    }
-  }
-  return false;
-}
-#endif
-
-
-FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
-                                     AllocationSpace owner,
-                                     int object_size)
-    : heap_(heap), owner_(owner), object_size_(object_size) {
-  Reset();
-}
-
-
-void FixedSizeFreeList::Reset() {
-  available_ = 0;
-  head_ = tail_ = NULL;
-}
-
-
-void FixedSizeFreeList::Free(Address start) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
-#endif
-  // We only use the freelists with mark-sweep.
-  ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, object_size_);
-  node->set_next(heap_, NULL);
-  if (head_ == NULL) {
-    tail_ = head_ = node->address();
-  } else {
-    FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
-    tail_ = node->address();
-  }
-  available_ += object_size_;
-}
-
-
-MaybeObject* FixedSizeFreeList::Allocate() {
-  if (head_ == NULL) {
-    return Failure::RetryAfterGC(owner_);
+    *list = NULL;
   }
 
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  FreeListNode* node = FreeListNode::FromAddress(head_);
-  head_ = node->next(heap_);
-  available_ -= object_size_;
   return node;
 }
 
 
-void FixedSizeFreeList::MarkNodes() {
-  Address cur_addr = head_;
-  while (cur_addr != NULL && cur_addr != tail_) {
-    FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-    cur_addr = cur_node->next(heap_);
-    cur_node->SetMark();
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
+
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = PickNodeFromList(&small_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = PickNodeFromList(&medium_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = PickNodeFromList(&large_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  for (FreeListNode** cur = &huge_list_;
+       *cur != NULL;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) break;
+
+    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *node_size = size;
+      *cur = node->next();
+      break;
+    }
+  }
+
+  return node;
+}
+
+
+// Allocation on the old space free list.  If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space.  If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  ASSERT(size_in_bytes <= kMaxBlockSize);
+  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+  // Don't free list allocate if there is linear space available.
+  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
+
+  int new_node_size = 0;
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == NULL) return NULL;
+
+  available_ -= new_node_size;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+
+  int bytes_left = new_node_size - size_in_bytes;
+  ASSERT(bytes_left >= 0);
+
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  owner_->Free(owner_->top(), old_linear_size);
+
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+    reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+  }
+#endif
+
+  owner_->heap()->incremental_marking()->OldSpaceStep(
+      size_in_bytes - old_linear_size);
+
+  // The old-space-step might have finished sweeping and restarted marking.
+  // Verify that it did not turn the page of the new node into an evacuation
+  // candidate.
+  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+  // Memory in the linear allocation area is counted as allocated.  We may free
+  // a little of this again immediately - see below.
+  owner_->Allocate(new_node_size);
+
+  if (bytes_left > kThreshold &&
+      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+      FLAG_incremental_marking_steps) {
+    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    // We don't want to give too large linear areas to the allocator while
+    // incremental marking is going on, because we won't check again whether
+    // we want to do another increment until the linear area is used up.
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + size_in_bytes + linear_size);
+  } else if (bytes_left > 0) {
+    // Normally we give the rest of the node to the allocator as its new
+    // linear allocation area.
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + new_node_size);
+  } else {
+    // TODO(gc) Try not freeing linear allocation region when bytes_left
+    // are zero.
+    owner_->SetTop(NULL, NULL);
+  }
+
+  return new_node;
+}
+
+
+static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
+  intptr_t sum = 0;
+  while (n != NULL) {
+    if (Page::FromAddress(n->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+      sum += free_space->Size();
+    }
+    n = n->next();
+  }
+  return sum;
+}
+
+
+void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
+  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+  if (sizes->huge_size_ < p->area_size()) {
+    sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
+    sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
+    sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+  } else {
+    sizes->small_size_ = 0;
+    sizes->medium_size_ = 0;
+    sizes->large_size_ = 0;
   }
 }
 
 
+static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
+  intptr_t sum = 0;
+  while (*n != NULL) {
+    if (Page::FromAddress((*n)->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      sum += free_space->Size();
+      *n = (*n)->next();
+    } else {
+      n = (*n)->next_address();
+    }
+  }
+  return sum;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
+
+  if (sum < p->area_size()) {
+    sum += EvictFreeListItemsInList(&small_list_, p) +
+        EvictFreeListItemsInList(&medium_list_, p) +
+        EvictFreeListItemsInList(&large_list_, p);
+  }
+
+  available_ -= static_cast<int>(sum);
+
+  return sum;
+}
+
+
+#ifdef DEBUG
+intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+  intptr_t sum = 0;
+  while (cur != NULL) {
+    ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->Size();
+    cur = cur->next();
+  }
+  return sum;
+}
+
+
+static const int kVeryLongFreeList = 500;
+
+
+int FreeList::FreeListLength(FreeListNode* cur) {
+  int length = 0;
+  while (cur != NULL) {
+    length++;
+    cur = cur->next();
+    if (length == kVeryLongFreeList) return length;
+  }
+  return length;
+}
+
+
+bool FreeList::IsVeryLong() {
+  if (FreeListLength(small_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(medium_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(large_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(huge_list_) == kVeryLongFreeList) return  true;
+  return false;
+}
+
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+  intptr_t sum = SumFreeList(small_list_);
+  sum += SumFreeList(medium_list_);
+  sum += SumFreeList(large_list_);
+  sum += SumFreeList(huge_list_);
+  return sum;
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // OldSpace implementation
 
-void OldSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Reset relocation info.  During a compacting collection, everything in
-    // the space is considered 'available' and we will rediscover live data
-    // and waste during the collection.
-    MCResetRelocationInfo();
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer is considered allocated (everything above is
-    // available) and we will rediscover available and wasted bytes during
-    // the collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-    accounting_stats_.FillWastedBytes(Waste());
+bool NewSpace::ReserveSpace(int bytes) {
+  // We can't reliably unpack a partial snapshot that needs more new space
+  // space than the minimum NewSpace size.  The limit can be set lower than
+  // the end of new space either because there is more space on the next page
+  // or because we have lowered the limit in order to get periodic incremental
+  // marking.  The most reliable way to ensure that there is linear space is
+  // to do the allocation, then rewind the limit.
+  ASSERT(bytes <= InitialCapacity());
+  MaybeObject* maybe = AllocateRaw(bytes);
+  Object* object = NULL;
+  if (!maybe->ToObject(&object)) return false;
+  HeapObject* allocation = HeapObject::cast(object);
+  Address top = allocation_info_.top;
+  if ((top - bytes) == allocation->address()) {
+    allocation_info_.top = allocation->address();
+    return true;
   }
+  // There may be a borderline case here where the allocation succeeded, but
+  // the limit and top have moved on to a new page.  In that case we try again.
+  return ReserveSpace(bytes);
+}
+
+
+void PagedSpace::PrepareForMarkCompact() {
+  // We don't have a linear allocation area while sweeping.  It will be restored
+  // on the first allocation after the sweep.
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.
+  int old_linear_size = static_cast<int>(limit() - top());
+  Free(top(), old_linear_size);
+  SetTop(NULL, NULL);
+
+  // Stop lazy sweeping and clear marking bits for unswept pages.
+  if (first_unswept_page_ != NULL) {
+    Page* p = first_unswept_page_;
+    do {
+      // Do not use ShouldBeSweptLazily predicate here.
+      // New evacuation candidates were selected but they still have
+      // to be swept before collection starts.
+      if (!p->WasSwept()) {
+        Bitmap::Clear(p);
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+      }
+      p = p->next_page();
+    } while (p != anchor());
+  }
+  first_unswept_page_ = Page::FromAddress(NULL);
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-void OldSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
+bool PagedSpace::ReserveSpace(int size_in_bytes) {
+  ASSERT(size_in_bytes <= AreaSize());
+  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
+  Address current_top = allocation_info_.top;
+  Address new_top = current_top + size_in_bytes;
+  if (new_top <= allocation_info_.limit) return true;
 
-  // The space is compacted and we haven't yet built free lists or
-  // wasted any space.
-  ASSERT(Waste() == 0);
-  ASSERT(AvailableFree() == 0);
+  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
+  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
+  if (new_area == NULL) return false;
 
-  // Build the free list for the space.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* p = it.next();
-    // Space below the relocation pointer is allocated.
-    computed_size +=
-        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
-    if (it.has_next()) {
-      // Free the space at the top of the page.
-      int extra_size =
-          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
-      if (extra_size > 0) {
-        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
-                                           extra_size);
-        // The bytes we have just "freed" to add to the free list were
-        // already accounted as available.
-        accounting_stats_.WasteBytes(wasted_bytes);
-      }
-    }
-  }
+  int old_linear_size = static_cast<int>(limit() - top());
+  // Mark the old linear allocation area with a free space so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  Free(top(), old_linear_size);
 
-  // Make sure the computed size - based on the used portion of the pages in
-  // use - matches the size obtained while computing forwarding addresses.
-  ASSERT(computed_size == Size());
-}
-
-
-bool NewSpace::ReserveSpace(int bytes) {
-  // We can't reliably unpack a partial snapshot that needs more new space
-  // space than the minimum NewSpace size.
-  ASSERT(bytes <= InitialCapacity());
-  Address limit = allocation_info_.limit;
-  Address top = allocation_info_.top;
-  return limit - top >= bytes;
-}
-
-
-void PagedSpace::FreePages(Page* prev, Page* last) {
-  if (last == AllocationTopPage()) {
-    // Pages are already at the end of used pages.
-    return;
-  }
-
-  Page* first = NULL;
-
-  // Remove pages from the list.
-  if (prev == NULL) {
-    first = first_page_;
-    first_page_ = last->next_page();
-  } else {
-    first = prev->next_page();
-    heap()->isolate()->memory_allocator()->SetNextPage(
-        prev, last->next_page());
-  }
-
-  // Attach it after the last page.
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
-  last_page_ = last;
-  heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
-
-  // Clean them up.
-  do {
-    first->InvalidateWatermark(true);
-    first->SetAllocationWatermark(first->ObjectAreaStart());
-    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
-    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    first = first->next_page();
-  } while (first != NULL);
-
-  // Order of pages in this space might no longer be consistent with
-  // order of pages in chunks.
-  page_list_is_chunk_ordered_ = false;
-}
-
-
-void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
-  const bool add_to_freelist = true;
-
-  // Mark used and unused pages to properly fill unused pages
-  // after reordering.
-  PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
-  Page* last_in_use = AllocationTopPage();
-  bool in_use = true;
-
-  while (all_pages_iterator.has_next()) {
-    Page* p = all_pages_iterator.next();
-    p->SetWasInUseBeforeMC(in_use);
-    if (p == last_in_use) {
-      // We passed a page containing allocation top. All consequent
-      // pages are not used.
-      in_use = false;
-    }
-  }
-
-  if (page_list_is_chunk_ordered_) return;
-
-  Page* new_last_in_use = Page::FromAddress(NULL);
-  heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
-      this, &first_page_, &last_page_, &new_last_in_use);
-  ASSERT(new_last_in_use->is_valid());
-
-  if (new_last_in_use != last_in_use) {
-    // Current allocation top points to a page which is now in the middle
-    // of page list. We should move allocation top forward to the new last
-    // used page so various object iterators will continue to work properly.
-    int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
-                                         last_in_use->AllocationTop());
-
-    last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-    if (size_in_bytes > 0) {
-      Address start = last_in_use->AllocationTop();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
-      }
-    }
-
-    // New last in use page was in the middle of the list before
-    // sorting so it full.
-    SetTop(new_last_in_use->AllocationTop());
-
-    ASSERT(AllocationTopPage() == new_last_in_use);
-    ASSERT(AllocationTopPage()->WasInUseBeforeMC());
-  }
-
-  PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
-  while (pages_in_use_iterator.has_next()) {
-    Page* p = pages_in_use_iterator.next();
-    if (!p->WasInUseBeforeMC()) {
-      // Empty page is in the middle of a sequence of used pages.
-      // Allocate it as a whole and deallocate immediately.
-      int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
-                                           p->ObjectAreaStart());
-
-      p->SetAllocationWatermark(p->ObjectAreaStart());
-      Address start = p->ObjectAreaStart();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
-      }
-    }
-  }
-
-  page_list_is_chunk_ordered_ = true;
-}
-
-
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
-  if (will_compact) {
-    RelinkPageListInChunkOrder(false);
-  }
-}
-
-
-bool PagedSpace::ReserveSpace(int bytes) {
-  Address limit = allocation_info_.limit;
-  Address top = allocation_info_.top;
-  if (limit - top >= bytes) return true;
-
-  // There wasn't enough space in the current page.  Lets put the rest
-  // of the page on the free list and start a fresh page.
-  PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
-
-  Page* reserved_page = TopPageOf(allocation_info_);
-  int bytes_left_to_reserve = bytes;
-  while (bytes_left_to_reserve > 0) {
-    if (!reserved_page->next_page()->is_valid()) {
-      if (heap()->OldGenerationAllocationLimitReached()) return false;
-      Expand(reserved_page);
-    }
-    bytes_left_to_reserve -= Page::kPageSize;
-    reserved_page = reserved_page->next_page();
-    if (!reserved_page->is_valid()) return false;
-  }
-  ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
-  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&allocation_info_,
-                    TopPageOf(allocation_info_)->next_page());
+  SetTop(new_area->address(), new_area->address() + size_in_bytes);
+  Allocate(size_in_bytes);
   return true;
 }
 
@@ -2206,45 +2201,55 @@
 }
 
 
-// Slow case for normal allocation.  Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed (size_in_bytes should not be greater than a page's
-  // object area size).
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
+bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
+  if (IsSweepingComplete()) return true;
 
-  // There is no next page in this space.  Try free list allocation unless that
-  // is currently forbidden.
-  if (!heap()->linear_allocation()) {
-    int wasted_bytes;
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
-    accounting_stats_.WasteBytes(wasted_bytes);
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
-
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
-
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
+  intptr_t freed_bytes = 0;
+  Page* p = first_unswept_page_;
+  do {
+    Page* next_page = p->next_page();
+    if (ShouldBeSweptLazily(p)) {
+      if (FLAG_gc_verbose) {
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
+               reinterpret_cast<intptr_t>(p));
       }
-
-      return obj;
+      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
     }
+    p = next_page;
+  } while (p != anchor() && freed_bytes < bytes_to_sweep);
+
+  if (p == anchor()) {
+    first_unswept_page_ = Page::FromAddress(NULL);
+  } else {
+    first_unswept_page_ = p;
   }
 
+  heap()->LowerOldGenLimits(freed_bytes);
+
+  heap()->FreeQueuedChunks();
+
+  return IsSweepingComplete();
+}
+
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+  if (allocation_info_.top >= allocation_info_.limit) return;
+
+  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit - allocation_info_.top);
+    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+
+    allocation_info_.top = NULL;
+    allocation_info_.limit = NULL;
+  }
+}
+
+
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Allocation in this space has failed.
+
   // Free list allocation failed and there is no next page.  Fail if we have
   // hit the old generation size limit that should cause a garbage
   // collection.
@@ -2253,10 +2258,26 @@
     return NULL;
   }
 
+  // If there are unswept pages advance lazy sweeper.
+  if (first_unswept_page_->is_valid()) {
+    AdvanceSweeper(size_in_bytes);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
+
+    if (!IsSweepingComplete()) {
+      AdvanceSweeper(kMaxInt);
+
+      // Retry the free list allocation.
+      object = free_list_.Allocate(size_in_bytes);
+      if (object != NULL) return object;
+    }
+  }
+
   // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
+  if (Expand()) {
+    return free_list_.Allocate(size_in_bytes);
   }
 
   // Finally, fail.
@@ -2264,53 +2285,6 @@
 }
 
 
-void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  if (free_size > 0) {
-    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
-    accounting_stats_.WasteBytes(wasted_bytes);
-  }
-}
-
-
-void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  // In the fixed space free list all the free list items have the right size.
-  // We use up the rest of the page while preserving this invariant.
-  while (free_size >= object_size_in_bytes_) {
-    free_list_.Free(allocation_info_.top);
-    allocation_info_.top += object_size_in_bytes_;
-    free_size -= object_size_in_bytes_;
-    accounting_stats_.WasteBytes(object_size_in_bytes_);
-  }
-}
-
-
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  PutRestOfCurrentPageOnFreeList(current_page);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void OldSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  Free(start, size_in_bytes, add_to_freelist);
-}
-
-
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics() {
   Isolate* isolate = Isolate::Current();
@@ -2413,7 +2387,7 @@
 void PagedSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2438,16 +2412,17 @@
 }
 
 
-void OldSpace::ReportStatistics() {
+void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", waste: %" V8_PTR_PREFIX "d"
              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
+  if (was_swept_conservatively_) return;
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
     CollectHistogramInfo(obj);
   ReportHistogram(true);
 }
@@ -2456,192 +2431,28 @@
 // -----------------------------------------------------------------------------
 // FixedSpace implementation
 
-void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+void FixedSpace::PrepareForMarkCompact() {
   // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
+  PagedSpace::PrepareForMarkCompact();
 
-  if (will_compact) {
-    // Reset relocation info.
-    MCResetRelocationInfo();
-
-    // During a compacting collection, everything in the space is considered
-    // 'available' (set by the call to MCResetRelocationInfo) and we will
-    // rediscover live and wasted bytes during the collection.
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer except wasted top-of-page blocks is considered
-    // allocated and we will rediscover available bytes during the
-    // collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-  }
+  // During a non-compacting collection, everything below the linear
+  // allocation pointer except wasted top-of-page blocks is considered
+  // allocated and we will rediscover available bytes during the
+  // collection.
+  accounting_stats_.AllocateBytes(free_list_.available());
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-void FixedSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-
-  // The space is compacted and we haven't yet wasted any space.
-  ASSERT(Waste() == 0);
-
-  // Update allocation_top of each page in use and compute waste.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address page_top = page->AllocationTop();
-    computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
-    if (it.has_next()) {
-      accounting_stats_.WasteBytes(
-          static_cast<int>(page->ObjectAreaEnd() - page_top));
-      page->SetAllocationWatermark(page_top);
-    }
-  }
-
-  // Make sure the computed size - based on the used portion of the
-  // pages in use - matches the size we adjust during allocation.
-  ASSERT(computed_size == Size());
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed.
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // There is no next page in this space.  Try free list allocation unless
-  // that is currently forbidden.  The fixed space free list implicitly assumes
-  // that all free blocks are of the fixed size.
-  if (!heap()->linear_allocation()) {
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate();
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
-
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
-      }
-
-      return obj;
-    }
-  }
-
-  // Free list allocation failed and there is no next page.  Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return NULL;
-  }
-
-  // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // Finally, fail.
-  return NULL;
-}
-
-
-// Move to the next page (there is assumed to be one) and allocate there.
-// The top of page block is always wasted, because it is too small to hold a
-// map.
-HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
-                                           int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  accounting_stats_.WasteBytes(page_extra_);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void FixedSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  // Free-list elements in fixed space are assumed to have a fixed size.
-  // We break the free block into chunks and add them to the free list
-  // individually.
-  int size = object_size_in_bytes();
-  ASSERT(size_in_bytes % size == 0);
-  Address end = start + size_in_bytes;
-  for (Address a = start; a < end; a += size) {
-    Free(a, add_to_freelist);
-  }
-}
-
-
-#ifdef DEBUG
-void FixedSpace::ReportStatistics() {
-  int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", waste: %" V8_PTR_PREFIX "d"
-             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-         Capacity(), Waste(), Available(), pct);
-
-  ClearHistograms();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
-    CollectHistogramInfo(obj);
-  ReportHistogram(false);
-}
-#endif
-
-
 // -----------------------------------------------------------------------------
 // MapSpace implementation
 
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  FixedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Initialize map index entry.
-    int page_count = 0;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (it.has_next()) {
-      ASSERT_MAP_PAGE_INDEX(page_count);
-
-      Page* p = it.next();
-      ASSERT(p->mc_page_index == page_count);
-
-      page_addresses_[page_count++] = p->address();
-    }
-  }
-}
-
-
 #ifdef DEBUG
 void MapSpace::VerifyObject(HeapObject* object) {
   // The object should be a map or a free-list node.
-  ASSERT(object->IsMap() || object->IsByteArray());
+  ASSERT(object->IsMap() || object->IsFreeSpace());
 }
 #endif
 
@@ -2662,107 +2473,43 @@
 // LargeObjectIterator
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = NULL;
 }
 
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
                                          HeapObjectCallback size_func) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = size_func;
 }
 
 
-HeapObject* LargeObjectIterator::next() {
+HeapObject* LargeObjectIterator::Next() {
   if (current_ == NULL) return NULL;
 
   HeapObject* object = current_->GetObject();
-  current_ = current_->next();
+  current_ = current_->next_page();
   return object;
 }
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
-                                        Executability executable) {
-  size_t requested = ChunkSizeFor(size_in_bytes);
-  size_t size;
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  Isolate* isolate = Isolate::Current();
-  void* mem = isolate->memory_allocator()->AllocateRawMemory(
-      requested + guard_size, &size, executable);
-  if (mem == NULL) return NULL;
-
-  // The start of the chunk may be overlayed with a page so we have to
-  // make sure that the page flags fit in the size field.
-  ASSERT((size & Page::kPageFlagMask) == 0);
-
-  LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
-  if (size < requested + guard_size) {
-    isolate->memory_allocator()->FreeRawMemory(
-        mem, size, executable);
-    LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
-    return NULL;
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(mem, guard_size);
-    size -= guard_size;
-    mem = static_cast<Address>(mem) + guard_size;
-  }
-
-  ObjectSpace space = (executable == EXECUTABLE)
-      ? kObjectSpaceCodeSpace
-      : kObjectSpaceLoSpace;
-  isolate->memory_allocator()->PerformAllocationCallback(
-      space, kAllocationActionAllocate, size);
-
-  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
-  chunk->size_ = size;
-  chunk->GetPage()->heap_ = isolate->heap();
-  return chunk;
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ObjectSpace space =
-      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
-  // Do not access instance fields after FreeRawMemory!
-  Address my_address = address();
-  size_t my_size = size();
-  Isolate* isolate = GetPage()->heap_->isolate();
-  MemoryAllocator* a = isolate->memory_allocator();
-  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
-  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
-  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
-}
-
-
-int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
-  int os_alignment = static_cast<int>(OS::AllocateAlignment());
-  if (os_alignment < Page::kPageSize) {
-    size_in_bytes += (Page::kPageSize - os_alignment);
-  }
-  return size_in_bytes + Page::kObjectStartOffset;
-}
-
-// -----------------------------------------------------------------------------
 // LargeObjectSpace
 
-LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap,
+                                   intptr_t max_capacity,
+                                   AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      first_chunk_(NULL),
+      max_capacity_(max_capacity),
+      first_page_(NULL),
       size_(0),
       page_count_(0),
       objects_size_(0) {}
 
 
 bool LargeObjectSpace::Setup() {
-  first_chunk_ = NULL;
+  first_page_ = NULL;
   size_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
@@ -2771,20 +2518,22 @@
 
 
 void LargeObjectSpace::TearDown() {
-  while (first_chunk_ != NULL) {
-    LargeObjectChunk* chunk = first_chunk_;
-    first_chunk_ = first_chunk_->next();
-    chunk->Free(chunk->GetPage()->PageExecutability());
+  while (first_page_ != NULL) {
+    LargePage* page = first_page_;
+    first_page_ = first_page_->next_page();
+    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+        space, kAllocationActionFree, page->size());
+    heap()->isolate()->memory_allocator()->Free(page);
   }
   Setup();
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable) {
-  ASSERT(0 < object_size && object_size <= requested_size);
-
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+                                           Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
@@ -2792,75 +2541,55 @@
     return Failure::RetryAfterGC(identity());
   }
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
-  if (chunk == NULL) {
+  if (Size() + object_size > max_capacity_) {
     return Failure::RetryAfterGC(identity());
   }
 
-  size_ += static_cast<int>(chunk->size());
-  objects_size_ += requested_size;
+  LargePage* page = heap()->isolate()->memory_allocator()->
+      AllocateLargePage(object_size, executable, this);
+  if (page == NULL) return Failure::RetryAfterGC(identity());
+  ASSERT(page->area_size() >= object_size);
+
+  size_ += static_cast<int>(page->size());
+  objects_size_ += object_size;
   page_count_++;
-  chunk->set_next(first_chunk_);
-  first_chunk_ = chunk;
+  page->set_next_page(first_page_);
+  first_page_ = page;
 
-  // Initialize page header.
-  Page* page = chunk->GetPage();
-  Address object_address = page->ObjectAreaStart();
+  HeapObject* object = page->GetObject();
 
-  // Clear the low order bit of the second word in the page to flag it as a
-  // large object page.  If the chunk_size happened to be written there, its
-  // low order bit should already be clear.
-  page->SetIsLargeObjectPage(true);
-  page->SetPageExecutability(executable);
-  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  return HeapObject::FromAddress(object_address);
-}
+#ifdef DEBUG
+  // Make the object consistent so the heap can be vefified in OldSpaceStep.
+  reinterpret_cast<Object**>(object->address())[0] =
+      heap()->fixed_array_map();
+  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+#endif
 
-
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
+  heap()->incremental_marking()->OldSpaceStep(object_size);
+  return object;
 }
 
 
 // GC support
 MaybeObject* LargeObjectSpace::FindObject(Address a) {
-  for (LargeObjectChunk* chunk = first_chunk_;
-       chunk != NULL;
-       chunk = chunk->next()) {
-    Address chunk_address = chunk->address();
-    if (chunk_address <= a && a < chunk_address + chunk->size()) {
-      return chunk->GetObject();
+  for (LargePage* page = first_page_;
+       page != NULL;
+       page = page->next_page()) {
+    Address page_address = page->address();
+    if (page_address <= a && a < page_address + page->size()) {
+      return page->GetObject();
     }
   }
   return Failure::Exception();
 }
 
 
-LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
   // TODO(853): Change this implementation to only find executable
   // chunks and use some kind of hash-based approach to speed it up.
-  for (LargeObjectChunk* chunk = first_chunk_;
+  for (LargePage* chunk = first_page_;
        chunk != NULL;
-       chunk = chunk->next()) {
+       chunk = chunk->next_page()) {
     Address chunk_address = chunk->address();
     if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
       return chunk;
@@ -2870,112 +2599,57 @@
 }
 
 
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
-  LargeObjectIterator it(this);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Page* page = Page::FromAddress(object->address());
-      uint32_t marks = page->GetRegionMarks();
-      uint32_t newmarks = Page::kAllRegionsCleanMarks;
-
-      if (marks != Page::kAllRegionsCleanMarks) {
-        // For a large page a single dirty mark corresponds to several
-        // regions (modulo 32). So we treat a large page as a sequence of
-        // normal pages of size Page::kPageSize having same dirty marks
-        // and subsequently iterate dirty regions on each of these pages.
-        Address start = object->address();
-        Address end = page->ObjectAreaEnd();
-        Address object_end = start + object->Size();
-
-        // Iterate regions of the first normal page covering object.
-        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
-        newmarks |=
-            heap()->IterateDirtyRegions(marks >> first_region_number,
-                                        start,
-                                        end,
-                                        &Heap::IteratePointersInDirtyRegion,
-                                        copy_object) << first_region_number;
-
-        start = end;
-        end = start + Page::kPageSize;
-        while (end <= object_end) {
-          // Iterate next 32 regions.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-          start = end;
-          end = start + Page::kPageSize;
-        }
-
-        if (start != object_end) {
-          // Iterate the last piece of an object which is less than
-          // Page::kPageSize.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          object_end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-        }
-
-        page->SetRegionMarks(newmarks);
-      }
-    }
-  }
-}
-
-
 void LargeObjectSpace::FreeUnmarkedObjects() {
-  LargeObjectChunk* previous = NULL;
-  LargeObjectChunk* current = first_chunk_;
+  LargePage* previous = NULL;
+  LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap()->mark_compact_collector()->tracer()->decrement_marked_count();
+    // Can this large page contain pointers to non-trivial objects.  No other
+    // pointer object is this big.
+    bool is_pointer_object = object->IsFixedArray();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
       previous = current;
-      current = current->next();
+      current = current->next_page();
     } else {
+      LargePage* page = current;
       // Cut the chunk out from the chunk list.
-      LargeObjectChunk* current_chunk = current;
-      current = current->next();
+      current = current->next_page();
       if (previous == NULL) {
-        first_chunk_ = current;
+        first_page_ = current;
       } else {
-        previous->set_next(current);
+        previous->set_next_page(current);
       }
 
       // Free the chunk.
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
-      LiveObjectList::ProcessNonLive(object);
-
-      size_ -= static_cast<int>(current_chunk->size());
+      size_ -= static_cast<int>(page->size());
       objects_size_ -= object->Size();
       page_count_--;
-      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
+
+      if (is_pointer_object) {
+        heap()->QueueMemoryChunkForFree(page);
+      } else {
+        heap()->isolate()->memory_allocator()->Free(page);
+      }
     }
   }
+  heap()->FreeQueuedChunks();
 }
 
 
 bool LargeObjectSpace::Contains(HeapObject* object) {
   Address address = object->address();
-  if (heap()->new_space()->Contains(address)) {
-    return false;
-  }
-  Page* page = Page::FromAddress(address);
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
 
-  SLOW_ASSERT(!page->IsLargeObjectPage()
-              || !FindObject(address)->IsFailure());
+  bool owned = (chunk->owner() == this);
 
-  return page->IsLargeObjectPage();
+  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+
+  return owned;
 }
 
 
@@ -2983,14 +2657,14 @@
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
 void LargeObjectSpace::Verify() {
-  for (LargeObjectChunk* chunk = first_chunk_;
+  for (LargePage* chunk = first_page_;
        chunk != NULL;
-       chunk = chunk->next()) {
+       chunk = chunk->next_page()) {
     // Each chunk contains an object that starts at the large object page's
     // object area start.
     HeapObject* object = chunk->GetObject();
     Page* page = Page::FromAddress(object->address());
-    ASSERT(object->address() == page->ObjectAreaStart());
+    ASSERT(object->address() == page->area_start());
 
     // The first word should be a map, and we expect all map pointers to be
     // in map space.
@@ -3015,9 +2689,6 @@
                           object->Size(),
                           &code_visitor);
     } else if (object->IsFixedArray()) {
-      // We loop over fixed arrays ourselves, rather then using the visitor,
-      // because the visitor doesn't support the start/offset iteration
-      // needed for IsRegionDirty.
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
         Object* element = array->get(j);
@@ -3025,13 +2696,6 @@
           HeapObject* element_object = HeapObject::cast(element);
           ASSERT(heap()->Contains(element_object));
           ASSERT(element_object->map()->IsMap());
-          if (heap()->InNewSpace(element_object)) {
-            Address array_addr = object->address();
-            Address element_addr = array_addr + FixedArray::kHeaderSize +
-                j * kPointerSize;
-
-            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
-          }
         }
       }
     }
@@ -3041,7 +2705,7 @@
 
 void LargeObjectSpace::Print() {
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     obj->Print();
   }
 }
@@ -3052,7 +2716,7 @@
   int num_objects = 0;
   ClearHistograms();
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     num_objects++;
     CollectHistogramInfo(obj);
   }
@@ -3066,13 +2730,38 @@
 void LargeObjectSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
     }
   }
 }
+
+
+void Page::Print() {
+  // Make a best-effort to print the objects in the page.
+  PrintF("Page@%p in %s\n",
+         this->address(),
+         AllocationSpaceName(this->owner()->identity()));
+  printf(" --------------------------------------\n");
+  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  unsigned mark_size = 0;
+  for (HeapObject* object = objects.Next();
+       object != NULL;
+       object = objects.Next()) {
+    bool is_marked = Marking::MarkBitFrom(object).Get();
+    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
+    if (is_marked) {
+      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+    }
+    object->ShortPrint();
+    PrintF("\n");
+  }
+  printf(" --------------------------------------\n");
+  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
index f156496..0ca8c39 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -49,45 +49,47 @@
 //
 // The semispaces of the young generation are contiguous.  The old and map
 // spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
+// area.
 //
 // There is a separate large object space for objects larger than
 // Page::kMaxHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
+// may be larger than the page size.
 //
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize
-// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See store-buffer.h.
 //
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer
-// fields). This requirement does not apply to map space which is iterated in
-// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
 //
-// To enable lazy cleaning of old space pages we use a notion of allocation
-// watermark. Every pointer under watermark is considered to be well formed.
-// Page allocation watermark is not necessarily equal to page allocation top but
-// all alive objects on page should reside under allocation watermark.
-// During scavenge allocation watermark might be bumped and invalid pointers
-// might appear below it. To avoid following them we store a valid watermark
-// into special field in the page header and set a page WATERMARK_INVALIDATED
-// flag. For details see comments in the Page::SetAllocationWatermark() method
-// body.
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage.  Garbage sections are marked with a special map.  These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries.  Garbage sections are chained
+// together to form a free list after a GC.  Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain.  Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
 //
+// Each page may have up to one special garbage section.  The start of this
+// section is denoted by the top field in the space.  The end of the section
+// is denoted by the limit field in the space.  This special garbage section
+// is not marked with a free space map in the data.  The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created.  The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
 
 // Some assertion macros used in the debugging mode.
 
@@ -101,7 +103,7 @@
   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
 
 #define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
 
 #define ASSERT_PAGE_OFFSET(offset)                                             \
   ASSERT((Page::kObjectStartOffset <= offset)                                  \
@@ -114,30 +116,532 @@
 class PagedSpace;
 class MemoryAllocator;
 class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+  typedef uint32_t CellType;
+
+  inline MarkBit(CellType* cell, CellType mask, bool data_only)
+      : cell_(cell), mask_(mask), data_only_(data_only) { }
+
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+  bool operator==(const MarkBit& other) {
+    return cell_ == other.cell_ && mask_ == other.mask_;
+  }
+#endif
+
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
+  inline bool data_only() { return data_only_; }
+
+  inline MarkBit Next() {
+    CellType new_mask = mask_ << 1;
+    if (new_mask == 0) {
+      return MarkBit(cell_ + 1, 1, data_only_);
+    } else {
+      return MarkBit(cell_, new_mask, data_only_);
+    }
+  }
+
+ private:
+  CellType* cell_;
+  CellType mask_;
+  // This boolean indicates that the object is in a data-only space with no
+  // pointers.  This enables some optimizations when marking.
+  // It is expected that this field is inlined and turned into control flow
+  // at the place where the MarkBit object is created.
+  bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+  static const uint32_t kBitsPerCell = 32;
+  static const uint32_t kBitsPerCellLog2 = 5;
+  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+  static const size_t kLength =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+  static const size_t kSize =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+  static int CellsForLength(int length) {
+    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+  }
+
+  int CellsCount() {
+    return CellsForLength(kLength);
+  }
+
+  static int SizeFor(int cells_count) {
+    return sizeof(MarkBit::CellType) * cells_count;
+  }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }
+
+  INLINE(MarkBit::CellType* cells()) {
+    return reinterpret_cast<MarkBit::CellType*>(this);
+  }
+
+  INLINE(Address address()) {
+    return reinterpret_cast<Address>(this);
+  }
+
+  INLINE(static Bitmap* FromAddress(Address addr)) {
+    return reinterpret_cast<Bitmap*>(addr);
+  }
+
+  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+    return MarkBit(cell, mask, data_only);
+  }
+
+  static inline void Clear(MemoryChunk* chunk);
+
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
+    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
+      PrintF((mask & word) ? "1" : "0");
+      if ((mask & himask) != 0) PrintF("]");
+    }
+  }
+
+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n",
+               seq_start,
+               seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
+  void Print() {
+    CellPrinter printer;
+    for (int i = 0; i < CellsCount(); i++) {
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
+    PrintF("\n");
+  }
+
+  bool IsClean() {
+    for (int i = 0; i < CellsCount(); i++) {
+      if (cells()[i] != 0) return false;
+    }
+    return true;
+  }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accomodate
+// any heap object.
+class MemoryChunk {
+ public:
+  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+  static MemoryChunk* FromAddress(Address a) {
+    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+  }
+
+  // Only works for addresses in pointer spaces, not data or code spaces.
+  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  bool is_valid() { return address() != NULL; }
+
+  MemoryChunk* next_chunk() const { return next_chunk_; }
+  MemoryChunk* prev_chunk() const { return prev_chunk_; }
+
+  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
+  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+        kFailureTag) {
+      return reinterpret_cast<Space*>(owner_ - kFailureTag);
+    } else {
+      return NULL;
+    }
+  }
+
+  void set_owner(Space* space) {
+    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
+    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+           kFailureTag);
+  }
+
+  VirtualMemory* reserved_memory() {
+    return &reservation_;
+  }
+
+  void InitializeReservedMemory() {
+    reservation_.Reset();
+  }
+
+  void set_reserved_memory(VirtualMemory* reservation) {
+    ASSERT_NOT_NULL(reservation);
+    reservation_.TakeControl(reservation);
+  }
+
+  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+  void initialize_scan_on_scavenge(bool scan) {
+    if (scan) {
+      SetFlag(SCAN_ON_SCAVENGE);
+    } else {
+      ClearFlag(SCAN_ON_SCAVENGE);
+    }
+  }
+  inline void set_scan_on_scavenge(bool scan);
+
+  int store_buffer_counter() { return store_buffer_counter_; }
+  void set_store_buffer_counter(int counter) {
+    store_buffer_counter_ = counter;
+  }
+
+  bool Contains(Address addr) {
+    return addr >= area_start() && addr < area_end();
+  }
+
+  // Checks whether addr can be a limit of addresses in this page.
+  // It's a limit if it's in the page, or if it's just after the
+  // last byte of the page.
+  bool ContainsLimit(Address addr) {
+    return addr >= area_start() && addr <= area_end();
+  }
+
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    CONTAINS_ONLY_DATA,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+
+    // Pages swept precisely can be iterated, hitting only the live objects.
+    // Whereas those swept conservatively cannot be iterated over. Both flags
+    // indicate that marking bits have been cleared by the sweeper, otherwise
+    // marking bits are still intact.
+    WAS_SWEPT_PRECISELY,
+    WAS_SWEPT_CONSERVATIVELY,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask =
+      1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) |
+      (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) |
+      (1 << IN_TO_SPACE);
+
+
+  void SetFlag(int flag) {
+    flags_ |= static_cast<uintptr_t>(1) << flag;
+  }
+
+  void ClearFlag(int flag) {
+    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
+  }
+
+  void SetFlagTo(int flag, bool value) {
+    if (value) {
+      SetFlag(flag);
+    } else {
+      ClearFlag(flag);
+    }
+  }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask
+  // are set to the value in "flags", the rest retain the current value
+  // in flags_.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
+
+  // Manage live byte count (count of bytes known to be live,
+  // because they are marked black).
+  void ResetLiveBytes() {
+    if (FLAG_gc_verbose) {
+      PrintF("ResetLiveBytes:%p:%x->0\n",
+             static_cast<void*>(this), live_byte_count_);
+    }
+    live_byte_count_ = 0;
+  }
+  void IncrementLiveBytes(int by) {
+    if (FLAG_gc_verbose) {
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
+             static_cast<void*>(this), live_byte_count_,
+             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+             live_byte_count_ + by);
+    }
+    live_byte_count_ += by;
+    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
+  }
+  int LiveBytes() {
+    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
+    return live_byte_count_;
+  }
+  static void IncrementLiveBytes(Address address, int by) {
+    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+  }
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+
+  static const intptr_t kLiveBytesOffset =
+     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+  static const size_t kHeaderSize =
+      kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+  static const int kBodyOffset =
+    CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset = kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  size_t size() const { return size_; }
+
+  void set_size(size_t size) {
+    size_ = size;
+  }
+
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
+
+  bool ContainsOnlyData() {
+    return IsFlagSet(CONTAINS_ONLY_DATA);
+  }
+
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() {
+    return IsFlagSet(IN_TO_SPACE);
+  }
+
+  bool InFromSpace() {
+    return IsFlagSet(IN_FROM_SPACE);
+  }
+
+  // ---------------------------------------------------------------------
+  // Markbits support
+
+  inline Bitmap* markbits() {
+    return Bitmap::FromAddress(address() + kHeaderSize);
+  }
+
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
+    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+  }
+
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset =
+        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  inline Address MarkbitIndexToAddress(uint32_t index) {
+    return this->address() + (index << kPointerSizeLog2);
+  }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
+
+  inline Heap* heap() { return heap_; }
+
+  static const int kFlagsOffset = kPointerSize * 3;
+
+  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
+
+  inline SkipList* skip_list() {
+    return skip_list_;
+  }
+
+  inline void set_skip_list(SkipList* skip_list) {
+    skip_list_ = skip_list;
+  }
+
+  inline SlotsBuffer* slots_buffer() {
+    return slots_buffer_;
+  }
+
+  inline SlotsBuffer** slots_buffer_address() {
+    return &slots_buffer_;
+  }
+
+  void MarkEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    SetFlag(EVACUATION_CANDIDATE);
+  }
+
+  void ClearEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    ClearFlag(EVACUATION_CANDIDATE);
+  }
+
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() {
+    return static_cast<int>(area_end() - area_start());
+  }
+
+ protected:
+  MemoryChunk* next_chunk_;
+  MemoryChunk* prev_chunk_;
+  size_t size_;
+  intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
+  // If the chunk needs to remember its memory reservation, it is stored here.
+  VirtualMemory reservation_;
+  // The identity of the owning space.  This is tagged as a failure pointer, but
+  // no failure can be in an object, so this can be distinguished from any entry
+  // in a fixed array.
+  Address owner_;
+  Heap* heap_;
+  // Used by the store buffer to keep track of which pages to mark scan-on-
+  // scavenge.
+  int store_buffer_counter_;
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+  SlotsBuffer* slots_buffer_;
+  SkipList* skip_list_;
+
+  static MemoryChunk* Initialize(Heap* heap,
+                                 Address base,
+                                 size_t size,
+                                 Address area_start,
+                                 Address area_end,
+                                 Executability executable,
+                                 Space* owner);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
 
 // -----------------------------------------------------------------------------
-// A page normally has 8K bytes. Large object pages may be larger.  A page
-// address is always aligned to the 8K page size.
-//
-// Each page starts with a header of Page::kPageHeaderSize size which contains
-// bookkeeping data.
-//
-// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The exact encoding is described in the comments for
-// class MapWord in objects.h.
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
 //   Page* p = Page::FromAllocationTop(top);
-class Page {
+class Page : public MemoryChunk {
  public:
   // Returns the page containing a given address. The address ranges
   // from [page_addr .. page_addr + kPageSize[
-  //
-  // Note that this function only works for addresses in normal paged
-  // spaces and addresses in the first 8K of large object pages (i.e.,
-  // the start of large objects but not necessarily derived pointers
-  // within them).
+  // This only works if the object is in fact in a page.  See also MemoryChunk::
+  // FromAddress() and FromAnyAddress().
   INLINE(static Page* FromAddress(Address a)) {
     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
   }
@@ -148,66 +652,23 @@
   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
   INLINE(static Page* FromAllocationTop(Address top)) {
     Page* p = FromAddress(top - kPointerSize);
-    ASSERT_PAGE_OFFSET(p->Offset(top));
     return p;
   }
 
-  // Returns the start address of this page.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  // Checks whether this is a valid page address.
-  bool is_valid() { return address() != NULL; }
-
-  // Returns the next page of this page.
+  // Returns the next page in the chain of pages owned by a space.
   inline Page* next_page();
-
-  // Return the end of allocation in this page. Undefined for unused pages.
-  inline Address AllocationTop();
-
-  // Return the allocation watermark for the page.
-  // For old space pages it is guaranteed that the area under the watermark
-  // does not contain any garbage pointers to new space.
-  inline Address AllocationWatermark();
-
-  // Return the allocation watermark offset from the beginning of the page.
-  inline uint32_t AllocationWatermarkOffset();
-
-  inline void SetAllocationWatermark(Address allocation_watermark);
-
-  inline void SetCachedAllocationWatermark(Address allocation_watermark);
-  inline Address CachedAllocationWatermark();
-
-  // Returns the start address of the object area in this page.
-  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
-
-  // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+  inline Page* prev_page();
+  inline void set_next_page(Page* page);
+  inline void set_prev_page(Page* page);
 
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
   }
 
-  // True if this page was in use before current compaction started.
-  // Result is valid only for pages owned by paged spaces and
-  // only after PagedSpace::PrepareForMarkCompact was called.
-  inline bool WasInUseBeforeMC();
-
-  inline void SetWasInUseBeforeMC(bool was_in_use);
-
-  // True if this page is a large object page.
-  inline bool IsLargeObjectPage();
-
-  inline void SetIsLargeObjectPage(bool is_large_object_page);
-
-  inline Executability PageExecutability();
-
-  inline void SetPageExecutability(Executability executable);
-
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
     int offset = static_cast<int>(a - address());
-    ASSERT_PAGE_OFFSET(offset);
     return offset;
   }
 
@@ -218,144 +679,70 @@
   }
 
   // ---------------------------------------------------------------------
-  // Card marking support
-
-  static const uint32_t kAllRegionsCleanMarks = 0x0;
-  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
-  inline uint32_t GetRegionMarks();
-  inline void SetRegionMarks(uint32_t dirty);
-
-  inline uint32_t GetRegionMaskForAddress(Address addr);
-  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
-  inline int GetRegionNumberForAddress(Address addr);
-
-  inline void MarkRegionDirty(Address addr);
-  inline bool IsRegionDirty(Address addr);
-
-  inline void ClearRegionMarks(Address start,
-                               Address end,
-                               bool reaches_limit);
 
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
+  // Object area size in bytes.
+  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
+
+  // Maximum object size that fits in a page.
+  static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
+
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
-  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
-    kIntSize + kPointerSize + kPointerSize;
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.
-  static const int kObjectStartOffset =
-      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
-
-  // Object area size in bytes.
-  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
-
-  // Maximum object size that fits in a page.
-  static const int kMaxHeapObjectSize = kObjectAreaSize;
-
-  static const int kDirtyFlagOffset = 2 * kPointerSize;
-  static const int kRegionSizeLog2 = 8;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
-
-  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
-
-  enum PageFlag {
-    IS_NORMAL_PAGE = 0,
-    WAS_IN_USE_BEFORE_MC,
-
-    // Page allocation watermark was bumped by preallocation during scavenge.
-    // Correct watermark can be retrieved by CachedAllocationWatermark() method
-    WATERMARK_INVALIDATED,
-    IS_EXECUTABLE,
-    NUM_PAGE_FLAGS  // Must be last
-  };
-  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
-  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
-  // scavenge we just invalidate the watermark on each old space page after
-  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
-  // flag at the beginning of the next scavenge and each page becomes marked as
-  // having a valid watermark.
-  //
-  // The following invariant must hold for pages in old pointer and map spaces:
-  //     If page is in use then page is marked as having invalid watermark at
-  //     the beginning and at the end of any GC.
-  //
-  // This invariant guarantees that after flipping flag meaning at the
-  // beginning of scavenge all pages in use will be marked as having valid
-  // watermark.
-  static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
-
-  // Returns true if the page allocation watermark was not altered during
-  // scavenge.
-  inline bool IsWatermarkValid();
-
-  inline void InvalidateWatermark(bool value);
-
-  inline bool GetPageFlag(PageFlag flag);
-  inline void SetPageFlag(PageFlag flag, bool value);
-  inline void ClearPageFlags();
-
   inline void ClearGCFields();
 
-  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
-  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
-  static const uint32_t kAllocationWatermarkOffsetMask =
-      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
-      kAllocationWatermarkOffsetShift;
+  static inline Page* Initialize(Heap* heap,
+                                 MemoryChunk* chunk,
+                                 Executability executable,
+                                 PagedSpace* owner);
 
-  static const uint32_t kFlagsMask =
-    ((1 << kAllocationWatermarkOffsetShift) - 1);
+  void InitializeAsAnchor(PagedSpace* owner);
 
-  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
-               kAllocationWatermarkOffsetBits);
+  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
+  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
+  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
 
-  //---------------------------------------------------------------------------
-  // Page header description.
-  //
-  // If a page is not in the large object space, the first word,
-  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
-  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
-  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
-  // or [next_page_start, next_page_end[. It cannot point to a valid address
-  // in the current page.  If a page is in the large object space, the first
-  // word *may* (if the page start and large object chunk start are the
-  // same) contain the address of the next large object chunk.
-  intptr_t opaque_header;
+  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
+  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // If the page is not in the large object space, the low-order bit of the
-  // second word is set. If the page is in the large object space, the
-  // second word *may* (if the page start and large object chunk start are
-  // the same) contain the large object chunk size.  In either case, the
-  // low-order bit for large object pages will be cleared.
-  // For normal pages this word is used to store page flags and
-  // offset of allocation top.
-  intptr_t flags_;
+  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
+  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // This field contains dirty marks for regions covering the page. Only dirty
-  // regions might contain intergenerational references.
-  // Only 32 dirty marks are supported so for large object pages several regions
-  // might be mapped to a single dirty mark.
-  uint32_t dirty_regions_;
+#ifdef DEBUG
+  void Print();
+#endif  // DEBUG
 
-  // The index of the page in its owner space.
-  int mc_page_index;
-
-  // During mark-compact collections this field contains the forwarding address
-  // of the first live object in this page.
-  // During scavenge collection this field is used to store allocation watermark
-  // if it is altered during scavenge.
-  Address mc_first_forwarded;
-
-  Heap* heap_;
+  friend class MemoryAllocator;
 };
 
 
+STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+
+
+class LargePage : public MemoryChunk {
+ public:
+  HeapObject* GetObject() {
+    return HeapObject::FromAddress(area_start());
+  }
+
+  inline LargePage* next_page() const {
+    return static_cast<LargePage*>(next_chunk());
+  }
+
+  inline void set_next_page(LargePage* page) {
+    set_next_chunk(page);
+  }
+ private:
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
 // ----------------------------------------------------------------------------
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
@@ -380,6 +767,14 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (id_ == CODE_SPACE) {
+      return RoundDown(size, kCodeAlignment);
+    } else {
+      return RoundDown(size, kPointerSize);
+    }
+  }
+
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -430,9 +825,9 @@
   // Allocates a chunk of memory from the large-object portion of
   // the code range.  On platforms with no separate code range, should
   // not be called.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated);
-  void FreeRawMemory(void* buf, size_t length);
+  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+                                            size_t* allocated);
+  void FreeRawMemory(Address buf, size_t length);
 
  private:
   Isolate* isolate_;
@@ -443,9 +838,15 @@
   class FreeBlock {
    public:
     FreeBlock(Address start_arg, size_t size_arg)
-        : start(start_arg), size(size_arg) {}
+        : start(start_arg), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
     FreeBlock(void* start_arg, size_t size_arg)
-        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+        : start(static_cast<Address>(start_arg)), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
 
     Address start;
     size_t size;
@@ -473,30 +874,63 @@
 };
 
 
+class SkipList {
+ public:
+  SkipList() {
+    Clear();
+  }
+
+  void Clear() {
+    for (int idx = 0; idx < kSize; idx++) {
+      starts_[idx] = reinterpret_cast<Address>(-1);
+    }
+  }
+
+  Address StartFor(Address addr) {
+    return starts_[RegionNumber(addr)];
+  }
+
+  void AddObject(Address addr, int size) {
+    int start_region = RegionNumber(addr);
+    int end_region = RegionNumber(addr + size - kPointerSize);
+    for (int idx = start_region; idx <= end_region; idx++) {
+      if (starts_[idx] > addr) starts_[idx] = addr;
+    }
+  }
+
+  static inline int RegionNumber(Address addr) {
+    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+  }
+
+  static void Update(Address addr, int size) {
+    Page* page = Page::FromAddress(addr);
+    SkipList* list = page->skip_list();
+    if (list == NULL) {
+      list = new SkipList();
+      page->set_skip_list(list);
+    }
+
+    list->AddObject(addr, size);
+  }
+
+ private:
+  static const int kRegionSizeLog2 = 13;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const int kSize = Page::kPageSize / kRegionSize;
+
+  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+  Address starts_[kSize];
+};
+
+
 // ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
-// allocator manages chunks for the paged heap spaces (old space and map
-// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
-// addresses and are linked as a list.
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
 //
-// The allocator keeps an initial chunk which is used for the new space.  The
-// leftover regions of the initial chunk are used for the initial chunks of
-// old space and map space if they are big enough to hold at least one page.
-// The allocator assumes that there is one old space and one map space, each
-// expands the space by allocating kPagesPerChunk pages except the last
-// expansion (before running out of space).  The first chunk may contain fewer
-// than kPagesPerChunk pages as well.
+// Each space has to manage it's own pages.
 //
-// The memory allocator also allocates chunks for the large object space, but
-// they are managed by the space itself.  The new space does not expand.
-//
-// The fact that pages for paged spaces are allocated and deallocated in chunks
-// induces a constraint on the order of pages in a linked lists. We say that
-// pages are linked in the chunk-order if and only if every two consecutive
-// pages from the same chunk are consecutive in the linked list.
-//
-
-
 class MemoryAllocator {
  public:
   explicit MemoryAllocator(Isolate* isolate);
@@ -505,91 +939,15 @@
   // Max capacity of the total space and executable memory limit.
   bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
 
-  // Deletes valid chunks.
   void TearDown();
 
-  // Reserves an initial address range of virtual memory to be split between
-  // the two new space semispaces, the old space, and the map space.  The
-  // memory is not yet committed or assigned to spaces and split into pages.
-  // The initial chunk is unmapped when the memory allocator is torn down.
-  // This function should only be called when there is not already a reserved
-  // initial chunk (initial_chunk_ should be NULL).  It returns the start
-  // address of the initial chunk if successful, with the side effect of
-  // setting the initial chunk, or else NULL if unsuccessful and leaves the
-  // initial chunk NULL.
-  void* ReserveInitialChunk(const size_t requested);
+  Page* AllocatePage(PagedSpace* owner, Executability executable);
 
-  // Commits pages from an as-yet-unmanaged block of virtual memory into a
-  // paged space.  The block should be part of the initial chunk reserved via
-  // a call to ReserveInitialChunk.  The number of pages is always returned in
-  // the output parameter num_pages.  This function assumes that the start
-  // address is non-null and that it is big enough to hold at least one
-  // page-aligned page.  The call always succeeds, and num_pages is always
-  // greater than zero.
-  Page* CommitPages(Address start, size_t size, PagedSpace* owner,
-                    int* num_pages);
+  LargePage* AllocateLargePage(intptr_t object_size,
+                                      Executability executable,
+                                      Space* owner);
 
-  // Commit a contiguous block of memory from the initial chunk.  Assumes that
-  // the address is not NULL, the size is greater than zero, and that the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
-
-  // Uncommit a contiguous block of memory [start..(start+size)[.
-  // start is not NULL, the size is greater than zero, and the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool UncommitBlock(Address start, size_t size);
-
-  // Zaps a contiguous block of memory [start..(start+size)[ thus
-  // filling it up with a recognizable non-NULL bit pattern.
-  void ZapBlock(Address start, size_t size);
-
-  // Attempts to allocate the requested (non-zero) number of pages from the
-  // OS.  Fewer pages might be allocated than requested. If it fails to
-  // allocate memory for the OS or cannot allocate a single page, this
-  // function returns an invalid page pointer (NULL). The caller must check
-  // whether the returned page is valid (by calling Page::is_valid()).  It is
-  // guaranteed that allocated pages have contiguous addresses.  The actual
-  // number of allocated pages is returned in the output parameter
-  // allocated_pages.  If the PagedSpace owner is executable and there is
-  // a code range, the pages are allocated from the code range.
-  Page* AllocatePages(int requested_pages, int* allocated_pages,
-                      PagedSpace* owner);
-
-  // Frees pages from a given page and after. Requires pages to be
-  // linked in chunk-order (see comment for class).
-  // If 'p' is the first page of a chunk, pages from 'p' are freed
-  // and this function returns an invalid page pointer.
-  // Otherwise, the function searches a page after 'p' that is
-  // the first page of a chunk. Pages after the found page
-  // are freed and the function returns 'p'.
-  Page* FreePages(Page* p);
-
-  // Frees all pages owned by given space.
-  void FreeAllPages(PagedSpace* space);
-
-  // Allocates and frees raw memory of certain size.
-  // These are just thin wrappers around OS::Allocate and OS::Free,
-  // but keep track of allocated bytes as part of heap.
-  // If the flag is EXECUTABLE and a code range exists, the requested
-  // memory is allocated from the code range.  If a code range exists
-  // and the freed memory is in it, the code range manages the freed memory.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated,
-                                          Executability executable);
-  void FreeRawMemory(void* buf,
-                     size_t length,
-                     Executability executable);
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                   ObjectSpace space,
-                                   AllocationAction action);
-  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+  void Free(MemoryChunk* chunk);
 
   // Returns the maximum available bytes of heaps.
   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -608,70 +966,85 @@
 
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   }
 
-  // Links two pages.
-  inline void SetNextPage(Page* prev, Page* next);
-
-  // Returns the next page of a given page.
-  inline Page* GetNextPage(Page* p);
-
-  // Checks whether a page belongs to a space.
-  inline bool IsPageInSpace(Page* p, PagedSpace* space);
-
-  // Returns the space that owns the given page.
-  inline PagedSpace* PageOwner(Page* page);
-
-  // Finds the first/last page in the same chunk as a given page.
-  Page* FindFirstPageInSameChunk(Page* p);
-  Page* FindLastPageInSameChunk(Page* p);
-
-  // Relinks list of pages owned by space to make it chunk-ordered.
-  // Returns new first and last pages of space.
-  // Also returns last page in relinked list which has WasInUsedBeforeMC
-  // flag set.
-  void RelinkPageListInChunkOrder(PagedSpace* space,
-                                  Page** first_page,
-                                  Page** last_page,
-                                  Page** last_page_in_use);
-
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
 #endif
 
-  // Due to encoding limitation, we can only have 8K chunks.
-  static const int kMaxNofChunks = 1 << kPageSizeBits;
-  // If a chunk has at least 16 pages, the maximum heap size is about
-  // 8K * 8K * 16 = 1G bytes.
-#ifdef V8_TARGET_ARCH_X64
-  static const int kPagesPerChunk = 32;
-  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
-  static const int kChunkTableLevels = 4;
-  static const int kChunkTableBitsPerLevel = 12;
-#else
-  static const int kPagesPerChunk = 16;
-  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
-  static const int kChunkTableLevels = 2;
-  static const int kChunkTableBitsPerLevel = 8;
-#endif
+  MemoryChunk* AllocateChunk(intptr_t body_size,
+                             Executability executable,
+                             Space* space);
+
+  Address ReserveAlignedMemory(size_t requested,
+                               size_t alignment,
+                               VirtualMemory* controller);
+  Address AllocateAlignedMemory(size_t requested,
+                                size_t alignment,
+                                Executability executable,
+                                VirtualMemory* controller);
+
+  void FreeMemory(VirtualMemory* reservation, Executability executable);
+  void FreeMemory(Address addr, size_t size, Executability executable);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  void PerformAllocationCallback(ObjectSpace space,
+                                 AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                          ObjectSpace space,
+                                          AllocationAction action);
+
+  void RemoveMemoryAllocationCallback(
+      MemoryAllocationCallback callback);
+
+  bool MemoryAllocationCallbackRegistered(
+      MemoryAllocationCallback callback);
+
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
 
  private:
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
-
   Isolate* isolate_;
 
   // Maximum space size in bytes.
-  intptr_t capacity_;
+  size_t capacity_;
   // Maximum subset of capacity_ that can be executable
-  intptr_t capacity_executable_;
+  size_t capacity_executable_;
 
   // Allocated space size in bytes.
-  intptr_t size_;
-
+  size_t size_;
   // Allocated executable space size in bytes.
-  intptr_t size_executable_;
+  size_t size_executable_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -683,64 +1056,11 @@
     ObjectSpace space;
     AllocationAction action;
   };
+
   // A List of callback that are triggered when memory is allocated or free'd
   List<MemoryAllocationCallbackRegistration>
       memory_allocation_callbacks_;
 
-  // The initial chunk of virtual memory.
-  VirtualMemory* initial_chunk_;
-
-  // Allocated chunk info: chunk start address, chunk size, and owning space.
-  class ChunkInfo BASE_EMBEDDED {
-   public:
-    ChunkInfo() : address_(NULL),
-                  size_(0),
-                  owner_(NULL),
-                  executable_(NOT_EXECUTABLE),
-                  owner_identity_(FIRST_SPACE) {}
-    inline void init(Address a, size_t s, PagedSpace* o);
-    Address address() { return address_; }
-    size_t size() { return size_; }
-    PagedSpace* owner() { return owner_; }
-    // We save executability of the owner to allow using it
-    // when collecting stats after the owner has been destroyed.
-    Executability executable() const { return executable_; }
-    AllocationSpace owner_identity() const { return owner_identity_; }
-
-   private:
-    Address address_;
-    size_t size_;
-    PagedSpace* owner_;
-    Executability executable_;
-    AllocationSpace owner_identity_;
-  };
-
-  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
-  List<ChunkInfo> chunks_;
-  List<int> free_chunk_ids_;
-  int max_nof_chunks_;
-  int top_;
-
-  // Push/pop a free chunk id onto/from the stack.
-  void Push(int free_chunk_id);
-  int Pop();
-  bool OutOfChunkIds() { return top_ == 0; }
-
-  // Frees a chunk.
-  void DeleteChunk(int chunk_id);
-
-  // Basic check whether a chunk id is in the valid range.
-  inline bool IsValidChunkId(int chunk_id);
-
-  // Checks whether a chunk id identifies an allocated chunk.
-  inline bool IsValidChunk(int chunk_id);
-
-  // Returns the chunk id that a page belongs to.
-  inline int GetChunkId(Page* p);
-
-  // True if the address lies in the initial chunk.
-  inline bool InInitialChunk(Address address);
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -748,13 +1068,7 @@
   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                PagedSpace* owner);
 
-  Page* RelinkPagesInChunk(int chunk_id,
-                           Address chunk_start,
-                           size_t chunk_size,
-                           Page* prev,
-                           Page** last_page_in_use);
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
 
@@ -777,111 +1091,67 @@
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
 //
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
 //
-// (1) If the space top changes upward during iteration (because of
-//     allocating new objects), the iterator does not iterate objects
-//     above the original space top. The caller must create a new
-//     iterator starting from the old top in order to visit these new
-//     objects.
-//
-// (2) If new objects are allocated below the original allocation top
-//     (e.g., free-list allocation in paged spaces), the new objects
-//     may or may not be iterated depending on their position with
-//     respect to the current point of iteration.
-//
-// (3) The space top should not change downward during iteration,
-//     otherwise the iterator will return not-necessarily-valid
-//     objects.
-
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects.  The caller must create a new
+// iterator in order to be sure to visit these new objects.
 class HeapObjectIterator: public ObjectIterator {
  public:
-  // Creates a new object iterator in a given space. If a start
-  // address is not given, the iterator starts from the space bottom.
+  // Creates a new object iterator in a given space.
   // If the size function is not given, the iterator calls the default
   // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(PagedSpace* space, Address start);
-  HeapObjectIterator(PagedSpace* space,
-                     Address start,
-                     HeapObjectCallback size_func);
   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
 
-  inline HeapObject* next() {
-    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+  // Advance to the next object, skipping free spaces and other fillers and
+  // skipping the special garbage section of which there is one per space.
+  // Returns NULL when the iteration has ended.
+  inline HeapObject* Next() {
+    do {
+      HeapObject* next_obj = FromCurrentPage();
+      if (next_obj != NULL) return next_obj;
+    } while (AdvanceToNextPage());
+    return NULL;
   }
 
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() {
+    return Next();
+  }
 
  private:
-  Address cur_addr_;  // current iteration point
-  Address end_addr_;  // end iteration point
-  Address cur_limit_;  // current page limit
-  HeapObjectCallback size_func_;  // size function
-  Page* end_page_;  // caches the page of the end address
+  enum PageMode { kOnePageOnly, kAllPagesInSpace };
 
-  HeapObject* FromCurrentPage() {
-    ASSERT(cur_addr_ < cur_limit_);
+  Address cur_addr_;  // Current iteration point.
+  Address cur_end_;   // End iteration point.
+  HeapObjectCallback size_func_;  // Size function or NULL.
+  PagedSpace* space_;
+  PageMode page_mode_;
 
-    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    ASSERT_OBJECT_SIZE(obj_size);
+  // Fast (inlined) path of next().
+  inline HeapObject* FromCurrentPage();
 
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_limit_);
-
-    return obj;
-  }
-
-  // Slow path of next, goes into the next page.
-  HeapObject* FromNextPage();
+  // Slow path of next(), goes into the next page.  Returns false if the
+  // iteration has ended.
+  bool AdvanceToNextPage();
 
   // Initializes fields.
-  void Initialize(Address start, Address end, HeapObjectCallback size_func);
-
-#ifdef DEBUG
-  // Verifies whether fields have valid values.
-  void Verify();
-#endif
+  inline void Initialize(PagedSpace* owner,
+                         Address start,
+                         Address end,
+                         PageMode mode,
+                         HeapObjectCallback size_func);
 };
 
 
 // -----------------------------------------------------------------------------
 // A PageIterator iterates the pages in a paged space.
-//
-// The PageIterator class provides three modes for iterating pages in a space:
-//   PAGES_IN_USE iterates pages containing allocated objects.
-//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-//                    mark-compact collection.
-//   ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-//     returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-//     in pages returned by the iterator.  Allocation may cause the
-//     allocation pointer or MC allocation pointer in the last page to
-//     change between constructing the iterator and iterating the last
-//     page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-//     iterator will return deallocated pages.
 
 class PageIterator BASE_EMBEDDED {
  public:
-  enum Mode {
-    PAGES_IN_USE,
-    PAGES_USED_BY_MC,
-    ALL_PAGES
-  };
-
-  PageIterator(PagedSpace* space, Mode mode);
+  explicit inline PageIterator(PagedSpace* space);
 
   inline bool has_next();
   inline Page* next();
@@ -889,21 +1159,25 @@
  private:
   PagedSpace* space_;
   Page* prev_page_;  // Previous page returned.
-  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  Page* next_page_;
 };
 
 
 // -----------------------------------------------------------------------------
-// A space has a list of pages. The next page can be accessed via
-// Page::next_page() call. The next page of the last page is an
-// invalid page pointer. A space can expand and shrink dynamically.
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
 
 // An abstraction of allocation and relocation pointers in a page-structured
 // space.
 class AllocationInfo {
  public:
-  Address top;  // current allocation top
-  Address limit;  // current allocation limit
+  AllocationInfo() : top(NULL), limit(NULL) {
+  }
+
+  Address top;  // Current allocation top.
+  Address limit;  // Current allocation limit.
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
@@ -935,70 +1209,210 @@
   // Zero out all the allocation statistics (ie, no capacity).
   void Clear() {
     capacity_ = 0;
-    available_ = 0;
     size_ = 0;
     waste_ = 0;
   }
 
+  void ClearSizeWaste() {
+    size_ = capacity_;
+    waste_ = 0;
+  }
+
   // Reset the allocation statistics (ie, available = capacity with no
   // wasted or allocated bytes).
   void Reset() {
-    available_ = capacity_;
     size_ = 0;
     waste_ = 0;
   }
 
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
-  intptr_t Available() { return available_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
 
-  // Grow the space by adding available bytes.
+  // Grow the space by adding available bytes.  They are initially marked as
+  // being in use (part of the size), but will normally be immediately freed,
+  // putting them on the free list and removing them from size_.
   void ExpandSpace(int size_in_bytes) {
     capacity_ += size_in_bytes;
-    available_ += size_in_bytes;
+    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
-  // Shrink the space by removing available bytes.
+  // Shrink the space by removing available bytes.  Since shrinking is done
+  // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
-    available_ -= size_in_bytes;
     size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
   void DeallocateBytes(intptr_t size_in_bytes) {
     size_ -= size_in_bytes;
-    available_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Waste free bytes (available -> waste).
   void WasteBytes(int size_in_bytes) {
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
     waste_ += size_in_bytes;
-  }
-
-  // Consider the wasted bytes to be allocated, as they contain filler
-  // objects (waste -> size).
-  void FillWastedBytes(intptr_t size_in_bytes) {
-    waste_ -= size_in_bytes;
-    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
  private:
   intptr_t capacity_;
-  intptr_t available_;
   intptr_t size_;
   intptr_t waste_;
 };
 
 
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.  The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other.  The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
+// find a new space to allocate from.  This is done with the free list, which
+// is divided up into rough categories to cut down on waste.  Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words:  Such small free areas are discarded for efficiency reasons.
+//     They can be reclaimed by the compactor.  However the distance between top
+//     and limit may be this small.
+// 32-255 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 1-31 words in size.  These
+//     spaces are called small.
+// 256-2047 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 32-255 words in size.  These
+//     spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large.  It is used for top
+//     and limit when the object we need to allocate is 256-2047 words in size.
+//     These spaces are call large.
+// At least 16384 words.  This list is for objects of 2048 words or larger.
+//     Empty pages are added to this list.  These spaces are called huge.
+class FreeList BASE_EMBEDDED {
+ public:
+  explicit FreeList(PagedSpace* owner);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() { return available_; }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // ie, its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+#ifdef DEBUG
+  void Zap();
+  static intptr_t SumFreeList(FreeListNode* node);
+  static int FreeListLength(FreeListNode* cur);
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
+  struct SizeStats {
+    intptr_t Total() {
+      return small_size_ + medium_size_ + large_size_ + huge_size_;
+    }
+
+    intptr_t small_size_;
+    intptr_t medium_size_;
+    intptr_t large_size_;
+    intptr_t huge_size_;
+  };
+
+  void CountFreeListItems(Page* p, SizeStats* sizes);
+
+  intptr_t EvictFreeListItems(Page* p);
+
+ private:
+  // The size range of blocks, in bytes.
+  static const int kMinBlockSize = 3 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
+
+  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+  PagedSpace* owner_;
+  Heap* heap_;
+
+  // Total available bytes in all blocks on this free list.
+  int available_;
+
+  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMax = 0xff * kPointerSize;
+  static const int kMediumListMax = 0x7ff * kPointerSize;
+  static const int kLargeListMax = 0x3fff * kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kMediumAllocationMax = kSmallListMax;
+  static const int kLargeAllocationMax = kMediumListMax;
+  FreeListNode* small_list_;
+  FreeListNode* medium_list_;
+  FreeListNode* large_list_;
+  FreeListNode* huge_list_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
 class PagedSpace : public Space {
  public:
   // Creates a space with a maximum capacity, and an id.
@@ -1013,7 +1427,7 @@
   // the memory allocator's initial chunk) if possible.  If the block of
   // addresses is not big enough to contain a single page-aligned page, a
   // fresh chunk will be allocated.
-  bool Setup(Address start, size_t size);
+  bool Setup();
 
   // Returns true if the space has been successfully set up and not
   // subsequently torn down.
@@ -1026,8 +1440,6 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
-  // Never crashes even if a is not a valid pointer.
-  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1035,104 +1447,91 @@
   // linear in the number of objects in the page. It may be slow.
   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
 
-  // Checks whether page is currently in use by this space.
-  bool IsUsed(Page* page);
-
-  void MarkAllPagesClean();
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
+  virtual void PrepareForMarkCompact();
 
-  // The top of allocation in a page in this space. Undefined if page is unused.
-  Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top()
-        : PageAllocationLimit(page);
-  }
-
-  // The limit of allocation for a page in this space.
-  virtual Address PageAllocationLimit(Page* page) = 0;
-
-  void FlushTopPageWatermark() {
-    AllocationTopPage()->SetCachedAllocationWatermark(top());
-    AllocationTopPage()->InvalidateWatermark(true);
-  }
-
-  // Current capacity without growing (Size() + Available() + Waste()).
+  // Current capacity without growing (Size() + Available()).
   intptr_t Capacity() { return accounting_stats_.Capacity(); }
 
   // Total amount of memory committed for this space.  For paged
   // spaces this equals the capacity.
   intptr_t CommittedMemory() { return Capacity(); }
 
-  // Available bytes without growing.
-  intptr_t Available() { return accounting_stats_.Available(); }
+  // Sets the capacity, the available space and the wasted space to zero.
+  // The stats are rebuilt during sweeping by adding each page to the
+  // capacity and the size when it is encountered.  As free spaces are
+  // discovered during the sweeping they are subtracted from the size and added
+  // to the available and wasted totals.
+  void ClearStats() {
+    accounting_stats_.ClearSizeWaste();
+  }
 
-  // Allocated bytes in this space.
+  // Available bytes without growing.  These are the bytes on the free list.
+  // The bytes in the linear allocation area are not included in this total
+  // because updating the stats would slow down allocation.  New pages are
+  // immediately added to the free list so they show up here.
+  intptr_t Available() { return free_list_.available(); }
+
+  // Allocated bytes in this space.  Garbage bytes that were not found due to
+  // lazy sweeping are counted as being allocated!  The bytes in the current
+  // linear allocation area (between top and limit) are also counted here.
   virtual intptr_t Size() { return accounting_stats_.Size(); }
 
-  // Wasted bytes due to fragmentation and not recoverable until the
-  // next GC of this space.
-  intptr_t Waste() { return accounting_stats_.Waste(); }
+  // As size, but the bytes in the current linear allocation area are not
+  // included.
+  virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
 
-  // Returns the address of the first object in this space.
-  Address bottom() { return first_page_->ObjectAreaStart(); }
+  // Wasted bytes in this space.  These are just the bytes that were thrown away
+  // due to being too small to use for allocation.  They do not include the
+  // free bytes that were not found at all due to lazy sweeping.
+  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
   // Returns the allocation pointer in this space.
-  Address top() { return allocation_info_.top; }
+  Address top() {
+    return allocation_info_.top;
+  }
+  Address limit() { return allocation_info_.limit; }
 
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
 
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
-
   virtual bool ReserveSpace(int bytes);
 
-  // Used by ReserveSpace.
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
-
-  // Free all pages in range from prev (exclusive) to last (inclusive).
-  // Freed pages are moved to the end of page list.
-  void FreePages(Page* prev, Page* last);
-
-  // Deallocates a block.
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist) = 0;
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  int Free(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
+    return size_in_bytes - wasted;
+  }
 
   // Set space allocation info.
-  void SetTop(Address top) {
+  void SetTop(Address top, Address limit) {
+    ASSERT(top == limit ||
+           Page::FromAddress(top) == Page::FromAddress(limit - 1));
     allocation_info_.top = top;
-    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
+    allocation_info_.limit = limit;
   }
 
-  // ---------------------------------------------------------------------------
-  // Mark-compact collection support functions
-
-  // Set the relocation point to the beginning of the space.
-  void MCResetRelocationInfo();
-
-  // Writes relocation info to the top page.
-  void MCWriteRelocationInfoToPage() {
-    TopPageOf(mc_forwarding_info_)->
-        SetAllocationWatermark(mc_forwarding_info_.top);
+  void Allocate(int bytes) {
+    accounting_stats_.AllocateBytes(bytes);
   }
 
-  // Computes the offset of a given address in this space to the beginning
-  // of the space.
-  int MCSpaceOffsetForAddress(Address addr);
+  void IncreaseCapacity(int size) {
+    accounting_stats_.ExpandSpace(size);
+  }
 
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo() = 0;
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
 
-  // Releases half of unused pages.
-  void Shrink();
+  // Releases all of the unused pages.
+  void ReleaseAllUnusedPages();
 
-  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
-  bool EnsureCapacity(int capacity);
+  // The dummy page that anchors the linked list of pages.
+  Page* anchor() { return &anchor_; }
 
 #ifdef DEBUG
   // Print meta info and objects in this space.
@@ -1141,6 +1540,9 @@
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
 
+  // Reports statistics for the space
+  void ReportStatistics();
+
   // Overridden by subclasses to verify space-specific object
   // properties (e.g., only maps or free-list nodes are in map space).
   virtual void VerifyObject(HeapObject* obj) {}
@@ -1151,91 +1553,127 @@
   static void ResetCodeStatistics();
 #endif
 
-  // Returns the page of the allocation pointer.
-  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+  bool was_swept_conservatively() { return was_swept_conservatively_; }
+  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
 
-  void RelinkPageListInChunkOrder(bool deallocate_blocks);
+  // Evacuation candidates are swept by evacuator.  Needs to return a valid
+  // result before _and_ after evacuation has finished.
+  static bool ShouldBeSweptLazily(Page* p) {
+    return !p->IsEvacuationCandidate() &&
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
+           !p->WasSweptPrecisely();
+  }
+
+  void SetPagesToSweep(Page* first) {
+    if (first == &anchor_) first = NULL;
+    first_unswept_page_ = first;
+  }
+
+  bool AdvanceSweeper(intptr_t bytes_to_sweep);
+
+  bool IsSweepingComplete() {
+    return !first_unswept_page_->is_valid();
+  }
+
+  Page* FirstPage() { return anchor_.next_page(); }
+  Page* LastPage() { return anchor_.prev_page(); }
+
+  // Returns zero for pages that have so little fragmentation that it is not
+  // worth defragmenting them.  Otherwise a positive integer that gives an
+  // estimate of fragmentation on an arbitrary scale.
+  int Fragmentation(Page* p) {
+    FreeList::SizeStats sizes;
+    free_list_.CountFreeListItems(p, &sizes);
+
+    intptr_t ratio;
+    intptr_t ratio_threshold;
+    if (identity() == CODE_SPACE) {
+      ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+          AreaSize();
+      ratio_threshold = 10;
+    } else {
+      ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+          AreaSize();
+      ratio_threshold = 15;
+    }
+
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+             reinterpret_cast<void*>(p),
+             identity(),
+             static_cast<int>(sizes.small_size_),
+             static_cast<double>(sizes.small_size_ * 100) /
+                 AreaSize(),
+             static_cast<int>(sizes.medium_size_),
+             static_cast<double>(sizes.medium_size_ * 100) /
+                 AreaSize(),
+             static_cast<int>(sizes.large_size_),
+             static_cast<double>(sizes.large_size_ * 100) /
+                 AreaSize(),
+             static_cast<int>(sizes.huge_size_),
+             static_cast<double>(sizes.huge_size_ * 100) /
+                 AreaSize(),
+             (ratio > ratio_threshold) ? "[fragmented]" : "");
+    }
+
+    if (FLAG_always_compact && sizes.Total() != AreaSize()) {
+      return 1;
+    }
+    if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+    return static_cast<int>(ratio - ratio_threshold);
+  }
+
+  void EvictEvacuationCandidatesFromFreeLists();
+
+  bool CanExpand();
+
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() {
+    return area_size_;
+  }
 
  protected:
+  int area_size_;
+
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
-  // The first page in this space.
-  Page* first_page_;
+  // The dummy page that anchors the double linked list of pages.
+  Page anchor_;
 
-  // The last page in this space.  Initially set in Setup, updated in
-  // Expand and Shrink.
-  Page* last_page_;
-
-  // True if pages owned by this space are linked in chunk-order.
-  // See comment for class MemoryAllocator for definition of chunk-order.
-  bool page_list_is_chunk_ordered_;
+  // The space's free list.
+  FreeList free_list_;
 
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // Relocation information during mark-compact collections.
-  AllocationInfo mc_forwarding_info_;
-
   // Bytes of each page that cannot be allocated.  Possibly non-zero
   // for pages in spaces with only fixed-size objects.  Always zero
   // for pages in spaces with variable sized objects (those pages are
   // padded with free-list nodes).
   int page_extra_;
 
-  // Sets allocation pointer to a page bottom.
-  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+  bool was_swept_conservatively_;
 
-  // Returns the top page specified by an allocation info structure.
-  static Page* TopPageOf(AllocationInfo alloc_info) {
-    return Page::FromAllocationTop(alloc_info.limit);
-  }
-
-  int CountPagesToTop() {
-    Page* p = Page::FromAllocationTop(allocation_info_.top);
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    int counter = 1;
-    while (it.has_next()) {
-      if (it.next() == p) return counter;
-      counter++;
-    }
-    UNREACHABLE();
-    return -1;
-  }
+  Page* first_unswept_page_;
 
   // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS. Newly allocated
-  // pages are append to the last_page;
-  bool Expand(Page* last_page);
+  // it cannot allocate requested number of pages from OS.
+  bool Expand();
 
-  // Generic fast case allocation function that tries linear allocation in
-  // the top page of 'alloc_info'.  Returns NULL on failure.
-  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
-                                      int size_in_bytes);
-
-  // During normal allocation or deserialization, roll to the next page in
-  // the space (there is assumed to be one) and allocate there.  This
-  // function is space-dependent.
-  virtual HeapObject* AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) = 0;
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
 
   // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
-
-  // Slow path of MCAllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
-
-#ifdef DEBUG
-  // Returns the number of total pages in this space.
-  int CountTotalPages();
-#endif
-
- private:
-  // Returns a pointer to the page of the relocation pointer.
-  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
+  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class PageIterator;
 };
@@ -1276,20 +1714,114 @@
 };
 
 
+enum SemiSpaceId {
+  kFromSpace = 0,
+  kToSpace = 1
+};
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+
+  inline NewSpacePage* next_page() const {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) {
+    set_next_chunk(page);
+  }
+
+  inline NewSpacePage* prev_page() const {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) {
+    set_prev_chunk(page);
+  }
+
+  SemiSpace* semi_space() {
+    return reinterpret_cast<SemiSpace*>(owner());
+  }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
+  static bool IsAtStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
+        == kObjectStartOffset;
+  }
+
+  static bool IsAtEnd(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+  }
+
+  Address address() {
+    return reinterpret_cast<Address>(this);
+  }
+
+  // Finds the NewSpacePage containg the given address.
+  static inline NewSpacePage* FromAddress(Address address_in_page) {
+    Address page_start =
+        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+                                  ~Page::kPageAlignmentMask);
+    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+    return page;
+  }
+
+  // Find the page for a limit address. A limit address is either an address
+  // inside a page, or the address right after the last byte of a page.
+  static inline NewSpacePage* FromLimit(Address address_limit) {
+    return NewSpacePage::FromAddress(address_limit - 1);
+  }
+
+ private:
+  // Create a NewSpacePage object that is only used as anchor
+  // for the doubly-linked list of real pages.
+  explicit NewSpacePage(SemiSpace* owner) {
+    InitializeAsAnchor(owner);
+  }
+
+  static NewSpacePage* Initialize(Heap* heap,
+                                  Address start,
+                                  SemiSpace* semi_space);
+
+  // Intialize a fake NewSpacePage used as sentinel at the ends
+  // of a doubly-linked list of real NewSpacePages.
+  // Only uses the prev/next links, and sets flags to not be in new-space.
+  void InitializeAsAnchor(SemiSpace* owner);
+
+  friend class SemiSpace;
+  friend class SemiSpaceIterator;
+};
+
+
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
-// A semispace is a contiguous chunk of memory. The mark-compact collector
-// uses the memory in the from space as a marking stack when tracing live
-// objects.
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector  uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
 
 class SemiSpace : public Space {
  public:
   // Constructor.
-  explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
-    start_ = NULL;
-    age_mark_ = NULL;
-  }
+  SemiSpace(Heap* heap, SemiSpaceId semispace)
+    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+      start_(NULL),
+      age_mark_(NULL),
+      id_(semispace),
+      anchor_(this),
+      current_page_(NULL) { }
 
   // Sets up the semispace using the given chunk.
   bool Setup(Address start, int initial_capacity, int maximum_capacity);
@@ -1301,14 +1833,9 @@
   // True if the space has been set up but not torn down.
   bool HasBeenSetup() { return start_ != NULL; }
 
-  // Grow the size of the semispace by committing extra virtual memory.
-  // Assumes that the caller has checked that the semispace has not reached
-  // its maximum capacity (and thus there is space available in the reserved
-  // address range to grow).
-  bool Grow();
-
   // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity.
+  // requested must be larger than the current capacity and less than
+  // the maximum capacity.
   bool GrowTo(int new_capacity);
 
   // Shrinks the semispace to the new capacity.  The new capacity
@@ -1316,14 +1843,40 @@
   // semispace and less than the current capacity.
   bool ShrinkTo(int new_capacity);
 
-  // Returns the start address of the space.
-  Address low() { return start_; }
+  // Returns the start address of the first page of the space.
+  Address space_start() {
+    ASSERT(anchor_.next_page() != &anchor_);
+    return anchor_.next_page()->area_start();
+  }
+
+  // Returns the start address of the current page of the space.
+  Address page_low() {
+    return current_page_->area_start();
+  }
+
   // Returns one past the end address of the space.
-  Address high() { return low() + capacity_; }
+  Address space_end() {
+    return anchor_.prev_page()->area_end();
+  }
+
+  // Returns one past the end address of the current page of the space.
+  Address page_high() {
+    return current_page_->area_end();
+  }
+
+  bool AdvancePage() {
+    NewSpacePage* next_page = current_page_->next_page();
+    if (next_page == anchor()) return false;
+    current_page_ = next_page;
+    return true;
+  }
+
+  // Resets the space to using the first page.
+  void Reset();
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
-  void set_age_mark(Address mark) { age_mark_ = mark; }
+  void set_age_mark(Address mark);
 
   // True if the address is in the address range of this semispace (not
   // necessarily below the allocation pointer).
@@ -1338,11 +1891,6 @@
     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   }
 
-  // The offset of an address from the beginning of the space.
-  int SpaceOffsetForAddress(Address addr) {
-    return static_cast<int>(addr - low());
-  }
-
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called.
   virtual intptr_t Size() {
@@ -1359,9 +1907,19 @@
   bool Commit();
   bool Uncommit();
 
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
+  // Validate a range of of addresses in a SemiSpace.
+  // The "from" address must be on a page prior to the "to" address,
+  // in the linked page order, or it must be earlier on the same page.
+  static void AssertValidRange(Address from, Address to);
+#else
+  // Do nothing.
+  inline static void AssertValidRange(Address from, Address to) {}
 #endif
 
   // Returns the current capacity of the semi space.
@@ -1373,7 +1931,17 @@
   // Returns the initial capacity of the semi space.
   int InitialCapacity() { return initial_capacity_; }
 
+  SemiSpaceId id() { return id_; }
+
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
  private:
+  // Flips the semispace between being from-space and to-space.
+  // Copies the flags into the masked positions on all pages in the space.
+  void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+  NewSpacePage* anchor() { return &anchor_; }
+
   // The current and maximum capacity of the space.
   int capacity_;
   int maximum_capacity_;
@@ -1390,7 +1958,13 @@
   uintptr_t object_expected_;
 
   bool committed_;
+  SemiSpaceId id_;
 
+  NewSpacePage anchor_;
+  NewSpacePage* current_page_;
+
+  friend class SemiSpaceIterator;
+  friend class NewSpacePageIterator;
  public:
   TRACK_MEMORY("SemiSpace")
 };
@@ -1406,12 +1980,26 @@
   // Create an iterator over the objects in the given space.  If no start
   // address is given, the iterator starts from the bottom of the space.  If
   // no size function is given, the iterator calls Object::Size().
-  explicit SemiSpaceIterator(NewSpace* space);
-  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  SemiSpaceIterator(NewSpace* space, Address start);
 
-  HeapObject* next() {
+  // Iterate over all of allocated to-space.
+  explicit SemiSpaceIterator(NewSpace* space);
+  // Iterate over all of allocated to-space, with a custome size function.
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  // Iterate over part of allocated to-space, from start to the end
+  // of allocation.
+  SemiSpaceIterator(NewSpace* space, Address start);
+  // Iterate from one address to another in the same semi-space.
+  SemiSpaceIterator(Address from, Address to);
+
+  HeapObject* Next() {
     if (current_ == limit_) return NULL;
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      ASSERT(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return NULL;
+    }
 
     HeapObject* object = HeapObject::FromAddress(current_);
     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1421,14 +2009,13 @@
   }
 
   // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  void Initialize(NewSpace* space, Address start, Address end,
+  void Initialize(Address start,
+                  Address end,
                   HeapObjectCallback size_func);
 
-  // The semispace.
-  SemiSpace* space_;
   // The current iteration point.
   Address current_;
   // The end of iteration.
@@ -1439,6 +2026,34 @@
 
 
 // -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+  // Make an iterator that runs over all pages in to-space.
+  explicit inline NewSpacePageIterator(NewSpace* space);
+
+  // Make an iterator that runs over all pages in the given semispace,
+  // even those not used in allocation.
+  explicit inline NewSpacePageIterator(SemiSpace* space);
+
+  // Make iterator that iterates from the page containing start
+  // to the page that contains limit in the same semispace.
+  inline NewSpacePageIterator(Address start, Address limit);
+
+  inline bool has_next();
+  inline NewSpacePage* next();
+
+ private:
+  NewSpacePage* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  NewSpacePage* next_page_;
+  // Last page returned.
+  NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
 // The young generation space.
 //
 // The new space consists of a contiguous pair of semispaces.  It simply
@@ -1449,11 +2064,13 @@
   // Constructor.
   explicit NewSpace(Heap* heap)
     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap),
-      from_space_(heap) {}
+      to_space_(heap, kToSpace),
+      from_space_(heap, kFromSpace),
+      reservation_(),
+      inline_allocation_limit_step_(0) {}
 
   // Sets up the new space using the given chunk.
-  bool Setup(Address start, int size);
+  bool Setup(int reserved_semispace_size_, int max_semispace_size);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
@@ -1480,18 +2097,30 @@
     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
         == reinterpret_cast<uintptr_t>(start_);
   }
+
   bool Contains(Object* o) {
-    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+    Address a = reinterpret_cast<Address>(o);
+    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
   }
 
   // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+  virtual intptr_t Size() {
+    return pages_used_ * NewSpacePage::kAreaSize +
+        static_cast<int>(top() - to_space_.page_low());
+  }
+
   // The same, but returning an int.  We have to have the one that returns
   // intptr_t because it is inherited, but if we know we are dealing with the
   // new space, which can't get as big as the other spaces then this is useful:
   int SizeAsInt() { return static_cast<int>(Size()); }
 
   // Return the current capacity of a semispace.
+  intptr_t EffectiveCapacity() {
+    SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
+    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
+  }
+
+  // Return the current capacity of a semispace.
   intptr_t Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
@@ -1503,8 +2132,10 @@
     return Capacity();
   }
 
-  // Return the available bytes without growing in the active semispace.
-  intptr_t Available() { return Capacity() - Size(); }
+  // Return the available bytes without growing.
+  intptr_t Available() {
+    return Capacity() - Size();
+  }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -1519,9 +2150,12 @@
   }
 
   // Return the address of the allocation pointer in the active semispace.
-  Address top() { return allocation_info_.top; }
+  Address top() {
+    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
+    return allocation_info_.top;
+  }
   // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_.low(); }
+  Address bottom() { return to_space_.space_start(); }
 
   // Get the age mark of the inactive semispace.
   Address age_mark() { return from_space_.age_mark(); }
@@ -1533,54 +2167,68 @@
   Address start() { return start_; }
   uintptr_t mask() { return address_mask_; }
 
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+    ASSERT(Contains(addr));
+    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
+           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+  }
+
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+    return reinterpret_cast<Address>(index << kPointerSizeLog2);
+  }
+
   // The allocation top and limit addresses.
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }
 
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &allocation_info_);
-  }
-
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
-  }
+  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
-  // Reset the reloction pointer to the bottom of the inactive semispace in
-  // preparation for mark-compact collection.
-  void MCResetRelocationInfo();
-  // Update the allocation pointer in the active semispace after a
-  // mark-compact collection.
-  void MCCommitRelocationInfo();
 
-  // Get the extent of the inactive semispace (for use as a marking stack).
-  Address FromSpaceLow() { return from_space_.low(); }
-  Address FromSpaceHigh() { return from_space_.high(); }
-
-  // Get the extent of the active semispace (to sweep newly copied objects
-  // during a scavenge collection).
-  Address ToSpaceLow() { return to_space_.low(); }
-  Address ToSpaceHigh() { return to_space_.high(); }
-
-  // Offsets from the beginning of the semispaces.
-  int ToSpaceOffsetForAddress(Address a) {
-    return to_space_.SpaceOffsetForAddress(a);
+  void LowerInlineAllocationLimit(intptr_t step) {
+    inline_allocation_limit_step_ = step;
+    if (step == 0) {
+      allocation_info_.limit = to_space_.page_high();
+    } else {
+      allocation_info_.limit = Min(
+          allocation_info_.top + inline_allocation_limit_step_,
+          allocation_info_.limit);
+    }
+    top_on_previous_step_ = allocation_info_.top;
   }
-  int FromSpaceOffsetForAddress(Address a) {
-    return from_space_.SpaceOffsetForAddress(a);
+
+  // Get the extent of the inactive semispace (for use as a marking stack,
+  // or to zap it). Notice: space-addresses are not necessarily on the
+  // same page, so FromSpaceStart() might be above FromSpaceEnd().
+  Address FromSpacePageLow() { return from_space_.page_low(); }
+  Address FromSpacePageHigh() { return from_space_.page_high(); }
+  Address FromSpaceStart() { return from_space_.space_start(); }
+  Address FromSpaceEnd() { return from_space_.space_end(); }
+
+  // Get the extent of the active semispace's pages' memory.
+  Address ToSpaceStart() { return to_space_.space_start(); }
+  Address ToSpaceEnd() { return to_space_.space_end(); }
+
+  inline bool ToSpaceContains(Address address) {
+    return to_space_.Contains(address);
+  }
+  inline bool FromSpaceContains(Address address) {
+    return from_space_.Contains(address);
   }
 
   // True if the object is a heap object in the address range of the
   // respective semispace (not necessarily below the allocation pointer of the
   // semispace).
-  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
-  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
-  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+  // Try to switch the active semispace to a new, empty, page.
+  // Returns false if this isn't possible or reasonable (i.e., there
+  // are no pages, or the current page is already empty), or true
+  // if successful.
+  bool AddFreshPage();
 
   virtual bool ReserveSpace(int bytes);
 
@@ -1620,10 +2268,24 @@
     return from_space_.Uncommit();
   }
 
+  inline intptr_t inline_allocation_limit_step() {
+    return inline_allocation_limit_step_;
+  }
+
+  SemiSpace* active_space() { return &to_space_; }
+
  private:
+  // Update allocation info to match the current to-space page.
+  void UpdateAllocationInfo();
+
+  Address chunk_base_;
+  uintptr_t chunk_size_;
+
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
+  VirtualMemory reservation_;
+  int pages_used_;
 
   // Start address and bit mask for containment testing.
   Address start_;
@@ -1634,15 +2296,19 @@
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
-  AllocationInfo mc_forwarding_info_;
+
+  // When incremental marking is active we will set allocation_info_.limit
+  // to be lower than actual limit and then will gradually increase it
+  // in steps to guarantee that we do incremental marking steps even
+  // when all allocation is performed from inlined generated code.
+  intptr_t inline_allocation_limit_step_;
+
+  Address top_on_previous_step_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
 
-  // Implementation of AllocateRaw and MCAllocateRaw.
-  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
-      int size_in_bytes,
-      AllocationInfo* alloc_info);
+  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class SemiSpaceIterator;
 
@@ -1652,193 +2318,6 @@
 
 
 // -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline Address next(Heap* heap);
-  inline void set_next(Heap* heap, Address next);
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.
-class OldSpaceFreeList BASE_EMBEDDED {
- public:
-  OldSpaceFreeList(Heap* heap, AllocationSpace owner);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // ie, its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
-  MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
-
-  void MarkNodes();
-
- private:
-  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
-  // will always result in waste.)
-  static const int kMinBlockSize = 2 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
-  Heap* heap_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // Total available bytes in all blocks on this free list.
-  int available_;
-
-  // Blocks are put on exact free lists in an array, indexed by size in words.
-  // The available sizes are kept in an increasingly ordered list. Entries
-  // corresponding to sizes < kMinBlockSize always have an empty free list
-  // (but index kHead is used for the head of the size list).
-  struct SizeNode {
-    // Address of the head FreeListNode of the implied block size or NULL.
-    Address head_node_;
-    // Size (words) of the next larger available size if head_node_ != NULL.
-    int next_size_;
-  };
-  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
-  SizeNode free_[kFreeListsLength];
-
-  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
-  static const int kHead = kMinBlockSize / kPointerSize - 1;
-  static const int kEnd = kMaxInt;
-
-  // We keep a "finger" in the size list to speed up a common pattern:
-  // repeated requests for the same or increasing sizes.
-  int finger_;
-
-  // Starting from *prev, find and return the smallest size >= index (words),
-  // or kEnd. Update *prev to be the largest size < index, or kHead.
-  int FindSize(int index, int* prev) {
-    int cur = free_[*prev].next_size_;
-    while (cur < index) {
-      *prev = cur;
-      cur = free_[cur].next_size_;
-    }
-    return cur;
-  }
-
-  // Remove an existing element from the size list.
-  void RemoveSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur == index);
-    free_[prev].next_size_ = free_[cur].next_size_;
-    finger_ = prev;
-  }
-
-  // Insert a new element into the size list.
-  void InsertSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur != index);
-    free_[prev].next_size_ = index;
-    free_[index].next_size_ = cur;
-  }
-
-  // The size list is not updated during a sequence of calls to Free, but is
-  // rebuilt before the next allocation.
-  void RebuildSizeList();
-  bool needs_rebuild_;
-
-#ifdef DEBUG
-  // Does this free list contain a free block located at the address of 'node'?
-  bool Contains(FreeListNode* node);
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
-};
-
-
-// The free list for the map space.
-class FixedSizeFreeList BASE_EMBEDDED {
- public:
-  FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block starting at 'start' (assumed to
-  // have size object_size_) is placed on the free list.  Bookkeeping
-  // information will be written to the block, ie, its contents will be
-  // destroyed.  The start address should be word aligned.
-  void Free(Address start);
-
-  // Allocate a fixed sized block from the free list.  The block is unitialized.
-  // A failure is returned if no block is available.
-  MUST_USE_RESULT MaybeObject* Allocate();
-
-  void MarkNodes();
-
- private:
-  Heap* heap_;
-
-  // Available bytes on the free list.
-  intptr_t available_;
-
-  // The head of the free list.
-  Address head_;
-
-  // The tail of the free list.
-  Address tail_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // The size of the objects in this space.
-  int object_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
-};
-
-
-// -----------------------------------------------------------------------------
 // Old object space (excluding map objects)
 
 class OldSpace : public PagedSpace {
@@ -1849,71 +2328,28 @@
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable),
-        free_list_(heap, id) {
+      : PagedSpace(heap, max_capacity, id, executable) {
     page_extra_ = 0;
   }
 
-  // The bytes available on the free list (ie, not above the linear allocation
-  // pointer).
-  intptr_t AvailableFree() { return free_list_.available(); }
-
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd();
+    return page->area_end();
   }
 
-  // Give a block of memory to the space's free list.  It might be added to
-  // the free list or accounted as waste.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
-    accounting_stats_.DeallocateBytes(size_in_bytes);
-
-    if (add_to_freelist) {
-      int wasted_bytes = free_list_.Free(start, size_in_bytes);
-      accounting_stats_.WasteBytes(wasted_bytes);
-    }
-  }
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
-
-  // Prepare for full garbage collection.  Resets the relocation pointer and
-  // clears the free list.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
-  // Reports statistics for the space
-  void ReportStatistics();
-#endif
-
- protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- private:
-  // The space's free list.
-  OldSpaceFreeList free_list_;
-
  public:
   TRACK_MEMORY("OldSpace")
 };
 
 
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+  SLOW_ASSERT((space).page_low() <= (info).top             \
+              && (info).top <= (space).page_high()         \
+              && (info).limit <= (space).page_high())
+
+
 // -----------------------------------------------------------------------------
 // Old space for objects of a fixed size
 
@@ -1926,56 +2362,21 @@
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name),
-        free_list_(heap, id, object_size_in_bytes) {
-    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+        name_(name) {
+    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   }
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd() - page_extra_;
+    return page->area_end() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
-  // Give a fixed sized block of memory to the space's free list.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, bool add_to_freelist) {
-    if (add_to_freelist) {
-      free_list_.Free(start);
-    }
-    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
-  }
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
-
-  void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
-  // Reports statistic info of the space
-  void ReportStatistics();
-#endif
+  virtual void PrepareForMarkCompact();
 
  protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
   void ResetFreeList() {
     free_list_.Reset();
   }
@@ -1986,9 +2387,6 @@
 
   // The name of this space.
   const char* name_;
-
-  // The space's free list.
-  FixedSizeFreeList free_list_;
 };
 
 
@@ -2004,83 +2402,18 @@
            AllocationSpace id)
       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
         max_map_space_pages_(max_map_space_pages) {
-    ASSERT(max_map_space_pages < kMaxMapPageIndex);
   }
 
-  // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
   // Given an index, returns the page address.
-  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+  // TODO(1600): this limit is artifical just to keep code compilable
+  static const int kMaxMapPageIndex = 1 << 16;
 
-  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
-
-  // Are map pointers encodable into map word?
-  bool MapPointersEncodable() {
-    if (!FLAG_use_big_map_space) {
-      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
-      return true;
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(Map::kSize)) {
+      return RoundDown(size, Map::kSize);
+    } else {
+      return (size / Map::kSize) * Map::kSize;
     }
-    return CountPagesToTop() <= max_map_space_pages_;
-  }
-
-  // Should be called after forced sweep to find out if map space needs
-  // compaction.
-  bool NeedsCompaction(int live_maps) {
-    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
-  }
-
-  Address TopAfterCompaction(int live_maps) {
-    ASSERT(NeedsCompaction(live_maps));
-
-    int pages_left = live_maps / kMapsPerPage;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (pages_left-- > 0) {
-      ASSERT(it.has_next());
-      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    }
-    ASSERT(it.has_next());
-    Page* top_page = it.next();
-    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    ASSERT(top_page->is_valid());
-
-    int offset = live_maps % kMapsPerPage * Map::kSize;
-    Address top = top_page->ObjectAreaStart() + offset;
-    ASSERT(top < top_page->ObjectAreaEnd());
-    ASSERT(Contains(top));
-
-    return top;
-  }
-
-  void FinishCompaction(Address new_top, int live_maps) {
-    Page* top_page = Page::FromAddress(new_top);
-    ASSERT(top_page->is_valid());
-
-    SetAllocationInfo(&allocation_info_, top_page);
-    allocation_info_.top = new_top;
-
-    int new_size = live_maps * Map::kSize;
-    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
-    accounting_stats_.AllocateBytes(new_size);
-
-    // Flush allocation watermarks.
-    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
-      p->SetAllocationWatermark(p->AllocationTop());
-    }
-    top_page->SetAllocationWatermark(new_top);
-
-#ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      intptr_t actual_size = 0;
-      for (Page* p = first_page_; p != top_page; p = p->next_page())
-        actual_size += kMapsPerPage * Map::kSize;
-      actual_size += (new_top - top_page->ObjectAreaStart());
-      ASSERT(accounting_stats_.Size() == actual_size);
-    }
-#endif
-
-    Shrink();
-    ResetFreeList();
   }
 
  protected:
@@ -2089,7 +2422,7 @@
 #endif
 
  private:
-  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
@@ -2098,9 +2431,6 @@
 
   const int max_map_space_pages_;
 
-  // An array of page start address in a map space.
-  Address page_addresses_[kMaxMapPageIndex];
-
  public:
   TRACK_MEMORY("MapSpace")
 };
@@ -2116,6 +2446,14 @@
       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
   {}
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
+      return RoundDown(size, JSGlobalPropertyCell::kSize);
+    } else {
+      return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+    }
+  }
+
  protected:
 #ifdef DEBUG
   virtual void VerifyObject(HeapObject* obj);
@@ -2133,67 +2471,9 @@
 // A large object always starts at Page::kObjectStartOffset to a page.
 // Large objects do not move during garbage collections.
 
-// A LargeObjectChunk holds exactly one large object page with exactly one
-// large object.
-class LargeObjectChunk {
- public:
-  // Allocates a new LargeObjectChunk that contains a large object page
-  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
-  // object) bytes after the object area start of that page.
-  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
-
-  // Free the memory associated with the chunk.
-  void Free(Executability executable);
-
-  // Interpret a raw address as a large object chunk.
-  static LargeObjectChunk* FromAddress(Address address) {
-    return reinterpret_cast<LargeObjectChunk*>(address);
-  }
-
-  // Returns the address of this chunk.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  Page* GetPage() {
-    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  }
-
-  // Accessors for the fields of the chunk.
-  LargeObjectChunk* next() { return next_; }
-  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-  size_t size() { return size_ & ~Page::kPageFlagMask; }
-
-  // Compute the start address in the chunk.
-  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
-
-  // Returns the object in this chunk.
-  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
-
-  // Given a requested size returns the physical size of a chunk to be
-  // allocated.
-  static int ChunkSizeFor(int size_in_bytes);
-
-  // Given a chunk size, returns the object size it can accommodate.  Used by
-  // LargeObjectSpace::Available.
-  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
-    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
-    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
-  }
-
- private:
-  // A pointer to the next large object chunk in the space or NULL.
-  LargeObjectChunk* next_;
-
-  // The total size of this chunk.
-  size_t size_;
-
- public:
-  TRACK_MEMORY("LargeObjectChunk")
-};
-
-
 class LargeObjectSpace : public Space {
  public:
-  LargeObjectSpace(Heap* heap, AllocationSpace id);
+  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
   virtual ~LargeObjectSpace() {}
 
   // Initializes internal data structures.
@@ -2202,12 +2482,15 @@
   // Releases internal resources, frees objects in this space.
   void TearDown();
 
-  // Allocates a (non-FixedArray, non-Code) large object.
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
-  // Allocates a large Code object.
-  MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
-  // Allocates a large FixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
+  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+  }
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+                                           Executability executable);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
@@ -2231,10 +2514,7 @@
 
   // Finds a large object page containing the given pc, returns NULL
   // if such a page doesn't exist.
-  LargeObjectChunk* FindChunkContainingPc(Address pc);
-
-  // Iterates objects covered by dirty regions.
-  void IterateDirtyRegions(ObjectSlotCallback func);
+  LargePage* FindPageContainingPc(Address pc);
 
   // Frees unmarked objects.
   void FreeUnmarkedObjects();
@@ -2243,13 +2523,15 @@
   bool Contains(HeapObject* obj);
 
   // Checks whether the space is empty.
-  bool IsEmpty() { return first_chunk_ == NULL; }
+  bool IsEmpty() { return first_page_ == NULL; }
 
   // See the comments for ReserveSpace in the Space class.  This has to be
   // called after ReserveSpace has been called on the paged spaces, since they
   // may use some memory, leaving less for large objects.
   virtual bool ReserveSpace(int bytes);
 
+  LargePage* first_page() { return first_page_; }
+
 #ifdef DEBUG
   virtual void Verify();
   virtual void Print();
@@ -2261,18 +2543,13 @@
   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
 
  private:
+  intptr_t max_capacity_;
   // The head of the linked list of large object chunks.
-  LargeObjectChunk* first_chunk_;
+  LargePage* first_page_;
   intptr_t size_;  // allocated bytes
   int page_count_;  // number of chunks
   intptr_t objects_size_;  // size of objects
 
-  // Shared implementation of AllocateRaw, AllocateRawCode and
-  // AllocateRawFixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable);
-
   friend class LargeObjectIterator;
 
  public:
@@ -2285,17 +2562,78 @@
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
-  HeapObject* next();
+  HeapObject* Next();
 
   // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  LargeObjectChunk* current_;
+  LargePage* current_;
   HeapObjectCallback size_func_;
 };
 
 
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+  inline explicit PointerChunkIterator(Heap* heap);
+
+  // Return NULL when the iterator is done.
+  MemoryChunk* next() {
+    switch (state_) {
+      case kOldPointerState: {
+        if (old_pointer_iterator_.has_next()) {
+          return old_pointer_iterator_.next();
+        }
+        state_ = kMapState;
+        // Fall through.
+      }
+      case kMapState: {
+        if (map_iterator_.has_next()) {
+          return map_iterator_.next();
+        }
+        state_ = kLargeObjectState;
+        // Fall through.
+      }
+      case kLargeObjectState: {
+        HeapObject* heap_object;
+        do {
+          heap_object = lo_iterator_.Next();
+          if (heap_object == NULL) {
+            state_ = kFinishedState;
+            return NULL;
+          }
+          // Fixed arrays are the only pointer-containing objects in large
+          // object space.
+        } while (!heap_object->IsFixedArray());
+        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+        return answer;
+      }
+      case kFinishedState:
+        return NULL;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+
+ private:
+  enum State {
+    kOldPointerState,
+    kMapState,
+    kLargeObjectState,
+    kFinishedState
+  };
+  State state_;
+  PageIterator old_pointer_iterator_;
+  PageIterator map_iterator_;
+  LargeObjectIterator lo_iterator_;
+};
+
+
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;
diff --git a/src/splay-tree-inl.h b/src/splay-tree-inl.h
index 9c2287e..4640ed5 100644
--- a/src/splay-tree-inl.h
+++ b/src/splay-tree-inl.h
@@ -45,7 +45,7 @@
 bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
   if (is_empty()) {
     // If the tree is empty, insert the new node.
-    root_ = new Node(key, Config::kNoValue);
+    root_ = new Node(key, Config::NoValue());
   } else {
     // Splay on the key to move the last node on the search path
     // for the key to the root of the tree.
@@ -57,7 +57,7 @@
       return false;
     }
     // Insert the new node.
-    Node* node = new Node(key, Config::kNoValue);
+    Node* node = new Node(key, Config::NoValue());
     InsertInternal(cmp, node);
   }
   locator->bind(root_);
@@ -226,7 +226,7 @@
 void SplayTree<Config, Allocator>::Splay(const Key& key) {
   if (is_empty())
     return;
-  Node dummy_node(Config::kNoKey, Config::kNoValue);
+  Node dummy_node(Config::kNoKey, Config::NoValue());
   // Create a dummy node.  The use of the dummy node is a bit
   // counter-intuitive: The right child of the dummy node will hold
   // the L tree of the algorithm.  The left child of the dummy node
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
new file mode 100644
index 0000000..dd65cbc
--- /dev/null
+++ b/src/store-buffer-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+  ASSERT(!heap_->cell_space()->Contains(addr));
+  ASSERT(!heap_->code_space()->Contains(addr));
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  *top++ = addr;
+  heap_->public_set_store_buffer_top(top);
+  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+    ASSERT(top == limit_);
+    Compact();
+  } else {
+    ASSERT(top < limit_);
+  }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
+                !heap_->code_space()->Contains(addr) &&
+                !heap_->old_data_space()->Contains(addr) &&
+                !heap_->new_space()->Contains(addr));
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    if (top >= old_limit_) {
+      ASSERT(callback_ != NULL);
+      (*callback_)(heap_,
+                   MemoryChunk::FromAnyPointerAddress(addr),
+                   kStoreBufferFullEvent);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
new file mode 100644
index 0000000..863b69b
--- /dev/null
+++ b/src/store-buffer.cc
@@ -0,0 +1,696 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+    : heap_(heap),
+      start_(NULL),
+      limit_(NULL),
+      old_start_(NULL),
+      old_limit_(NULL),
+      old_top_(NULL),
+      old_buffer_is_sorted_(false),
+      old_buffer_is_filtered_(false),
+      during_gc_(false),
+      store_buffer_rebuilding_enabled_(false),
+      callback_(NULL),
+      may_move_store_buffer_entries_(true),
+      virtual_memory_(NULL),
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {
+}
+
+
+void StoreBuffer::Setup() {
+  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
+  uintptr_t start_as_int =
+      reinterpret_cast<uintptr_t>(virtual_memory_->address());
+  start_ =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+
+  old_top_ = old_start_ = new Address[kOldStoreBufferLength];
+  old_limit_ = old_start_ + kOldStoreBufferLength;
+
+  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+  Address* vm_limit = reinterpret_cast<Address*>(
+      reinterpret_cast<char*>(virtual_memory_->address()) +
+          virtual_memory_->size());
+  ASSERT(start_ <= vm_limit);
+  ASSERT(limit_ <= vm_limit);
+  USE(vm_limit);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+         0);
+
+  virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                          kStoreBufferSize,
+                          false);  // Not executable.
+  heap_->public_set_store_buffer_top(start_);
+
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::TearDown() {
+  delete virtual_memory_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
+  delete[] old_start_;
+  old_start_ = old_top_ = old_limit_ = NULL;
+  start_ = limit_ = NULL;
+  heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+  isolate->heap()->store_buffer()->Compact();
+}
+
+
+#if V8_TARGET_ARCH_X64
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  // Unfortunately if int is smaller than intptr_t there is no branch-free
+  // way to return a number with the same sign as the difference between the
+  // pointers.
+  if (a == b) return 0;
+  if (a < b) return -1;
+  ASSERT(a > b);
+  return 1;
+}
+#else
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  ASSERT(sizeof(1) == sizeof(a));
+  // Shift down to avoid wraparound.
+  return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
+}
+#endif
+
+
+void StoreBuffer::Uniq() {
+  // Remove adjacent duplicates and cells that do not point at new space.
+  Address previous = NULL;
+  Address* write = old_start_;
+  ASSERT(may_move_store_buffer_entries_);
+  for (Address* read = old_start_; read < old_top_; read++) {
+    Address current = *read;
+    if (current != previous) {
+      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+        *write++ = current;
+      }
+    }
+    previous = current;
+  }
+  old_top_ = write;
+}
+
+
+void StoreBuffer::HandleFullness() {
+  if (old_buffer_is_filtered_) return;
+  ASSERT(may_move_store_buffer_entries_);
+  Compact();
+
+  old_buffer_is_filtered_ = true;
+  bool page_has_scan_on_scavenge_flag = false;
+
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // If filtering out the entries from scan_on_scavenge pages got us down to
+  // less than half full, then we are satisfied with that.
+  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+
+  // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] =  {
+    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+    { 1, 0}
+  };
+  for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+    // As a last resort we mark all pages as being exempt from the store buffer.
+    ASSERT(i != 0 || old_top_ == old_start_);
+    if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+  }
+  UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    chunk->set_store_buffer_counter(0);
+  }
+  bool created_new_scan_on_scavenge_pages = false;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+    }
+    int old_counter = containing_chunk->store_buffer_counter();
+    if (old_counter == threshold) {
+      containing_chunk->set_scan_on_scavenge(true);
+      created_new_scan_on_scavenge_pages = true;
+    }
+    containing_chunk->set_store_buffer_counter(old_counter + 1);
+    previous_chunk = containing_chunk;
+  }
+  if (created_new_scan_on_scavenge_pages) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+  Address* new_top = old_start_;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p++) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+      previous_chunk = containing_chunk;
+    }
+    if (!containing_chunk->IsFlagSet(flag)) {
+      *new_top++ = addr;
+    }
+  }
+  old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::SortUniq() {
+  Compact();
+  if (old_buffer_is_sorted_) return;
+  qsort(reinterpret_cast<void*>(old_start_),
+        old_top_ - old_start_,
+        sizeof(*old_top_),
+        &CompareAddresses);
+  Uniq();
+
+  old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+  Compact();
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  bool page_has_scan_on_scavenge_flag = false;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
+  return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+  ClearFilteringHashSets();
+  Uniq();  // Also removes things that no longer point to new space.
+  CheckForFullBuffer();
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
+}
+
+
+void StoreBuffer::GCPrologue() {
+  ClearFilteringHashSets();
+  during_gc_ = true;
+}
+
+
+#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
+  // Do nothing.
+}
+
+
+void StoreBuffer::VerifyPointers(PagedSpace* space,
+                                 RegionCallback region_callback) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* page = it.next();
+    FindPointersToNewSpaceOnPage(
+        reinterpret_cast<PagedSpace*>(page->owner()),
+        page,
+        region_callback,
+        &DummyScavengePointer);
+  }
+}
+
+
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        heap_->InNewSpace(*slot);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef DEBUG
+  VerifyPointers(heap_->old_pointer_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInRegion);
+  VerifyPointers(heap_->map_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
+  VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+  during_gc_ = false;
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback) {
+  for (Address slot_address = start;
+       slot_address < end;
+       slot_address += kPointerSize) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap_->InNewSpace(*slot)) {
+      HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
+      ASSERT(object->IsHeapObject());
+      slot_callback(reinterpret_cast<HeapObject**>(slot), object);
+      if (heap_->InNewSpace(*slot)) {
+        EnterDirectlyIntoStoreBuffer(slot_address);
+      }
+    }
+  }
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+  Address page = Page::FromAddress(addr)->area_start();
+  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+  Address page = Page::FromAllocationTop(addr)->area_start();
+  return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  ASSERT(MapStartAlign(start) == start);
+  ASSERT(MapEndAlign(end) == end);
+
+  Address map_address = start;
+  while (map_address < end) {
+    ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
+    ASSERT(Memory::Object_at(map_address)->IsMap());
+
+    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+    FindPointersToNewSpaceInRegion(pointer_fields_start,
+                                   pointer_fields_end,
+                                   slot_callback);
+    map_address += Map::kSize;
+  }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  Address map_aligned_start = MapStartAlign(start);
+  Address map_aligned_end   = MapEndAlign(end);
+
+  ASSERT(map_aligned_start == start);
+  ASSERT(map_aligned_end == end);
+
+  FindPointersToNewSpaceInMaps(map_aligned_start,
+                               map_aligned_end,
+                               slot_callback);
+}
+
+
+// This function iterates over all the pointers in a paged space in the heap,
+// looking for pointers into new space.  Within the pages there may be dead
+// objects that have not been overwritten by free spaces or fillers because of
+// lazy sweeping.  These dead objects may not contain pointers to new space.
+// The garbage areas that have been swept properly (these will normally be the
+// large ones) will be marked with free space and filler map words.  In
+// addition any area that has never been used at all for object allocation must
+// be marked with a free space or filler.  Because the free space and filler
+// maps do not move we can always recognize these even after a compaction.
+// Normal objects like FixedArrays and JSObjects should not contain references
+// to these maps.  The special garbage section (see comment in spaces.h) is
+// skipped since it can contain absolutely anything.  Any objects that are
+// allocated during iteration may or may not be visited by the iteration, but
+// they will not be partially visited.
+void StoreBuffer::FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback) {
+  Address visitable_start = page->area_start();
+  Address end_of_page = page->area_end();
+
+  Address visitable_end = visitable_start;
+
+  Object* free_space_map = heap_->free_space_map();
+  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+
+  while (visitable_end < end_of_page) {
+    Object* o = *reinterpret_cast<Object**>(visitable_end);
+    // Skip fillers but not things that look like fillers in the special
+    // garbage section which can contain anything.
+    if (o == free_space_map ||
+        o == two_pointer_filler_map ||
+        (visitable_end == space->top() && visitable_end != space->limit())) {
+      if (visitable_start != visitable_end) {
+        // After calling this the special garbage section may have moved.
+        (this->*region_callback)(visitable_start,
+                                 visitable_end,
+                                 slot_callback);
+        if (visitable_end >= space->top() && visitable_end < space->limit()) {
+          visitable_end = space->limit();
+          visitable_start = visitable_end;
+          continue;
+        }
+      }
+      if (visitable_end == space->top() && visitable_end != space->limit()) {
+        visitable_start = visitable_end = space->limit();
+      } else {
+        // At this point we are either at the start of a filler or we are at
+        // the point where the space->top() used to be before the
+        // visit_pointer_region call above.  Either way we can skip the
+        // object at the current spot:  We don't promise to visit objects
+        // allocated during heap traversal, and if space->top() moved then it
+        // must be because an object was allocated at this point.
+        visitable_start =
+            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+        visitable_end = visitable_start;
+      }
+    } else {
+      ASSERT(o != free_space_map);
+      ASSERT(o != two_pointer_filler_map);
+      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+      visitable_end += kPointerSize;
+    }
+  }
+  ASSERT(visitable_end == end_of_page);
+  if (visitable_start != visitable_end) {
+    (this->*region_callback)(visitable_start,
+                             visitable_end,
+                             slot_callback);
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(
+    ObjectSlotCallback slot_callback) {
+  Address* limit = old_top_;
+  old_top_ = old_start_;
+  {
+    DontMoveStoreBufferEntriesScope scope(this);
+    for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+      Address* saved_top = old_top_;
+#endif
+      Object** slot = reinterpret_cast<Object**>(*current);
+      Object* object = *slot;
+      if (heap_->InFromSpace(object)) {
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+        if (heap_->InNewSpace(*slot)) {
+          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+        }
+      }
+      ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  // We do not sort or remove duplicated entries from the store buffer because
+  // we expect that callback will rebuild the store buffer thus removing
+  // all duplicates and pointers to old space.
+  bool some_pages_to_scan = PrepareForIteration();
+
+  // TODO(gc): we want to skip slots on evacuation candidates
+  // but we can't simply figure that out from slot address
+  // because slot can belong to a large object.
+  IteratePointersInStoreBuffer(slot_callback);
+
+  // We are done scanning all the pointers that were in the store buffer, but
+  // there may be some pages marked scan_on_scavenge that have pointers to new
+  // space that are not in the store buffer.  We must scan them now.  As we
+  // scan, the surviving pointers to new space will be added to the store
+  // buffer.  If there are still a lot of pointers to new space then we will
+  // keep the scan_on_scavenge flag on the page and discard the pointers that
+  // were added to the store buffer.  If there are not many pointers to new
+  // space left on the page we will keep the pointers in the store buffer and
+  // remove the flag from the page.
+  if (some_pages_to_scan) {
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+    }
+    PointerChunkIterator it(heap_);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != NULL) {
+      if (chunk->scan_on_scavenge()) {
+        chunk->set_scan_on_scavenge(false);
+        if (callback_ != NULL) {
+          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+        }
+        if (chunk->owner() == heap_->lo_space()) {
+          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+          HeapObject* array = large_page->GetObject();
+          ASSERT(array->IsFixedArray());
+          Address start = array->address();
+          Address end = start + array->Size();
+          FindPointersToNewSpaceInRegion(start, end, slot_callback);
+        } else {
+          Page* page = reinterpret_cast<Page*>(chunk);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          FindPointersToNewSpaceOnPage(
+              owner,
+              page,
+              (owner == heap_->map_space() ?
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
+                 &StoreBuffer::FindPointersToNewSpaceInRegion),
+              slot_callback);
+        }
+      }
+    }
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::Compact() {
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+  if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
+  ASSERT(top <= limit_);
+  heap_->public_set_store_buffer_top(start_);
+  if (top - start_ > old_limit_ - old_top_) {
+    HandleFullness();
+  }
+  ASSERT(may_move_store_buffer_entries_);
+  // Goes through the addresses in the store buffer attempting to remove
+  // duplicates.  In the interest of speed this is a lossy operation.  Some
+  // duplicates will remain.  We have two hash sets with different hash
+  // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
+  for (Address* current = start_; current < top; current++) {
+    ASSERT(!heap_->cell_space()->Contains(*current));
+    ASSERT(!heap_->code_space()->Contains(*current));
+    ASSERT(!heap_->old_data_space()->Contains(*current));
+    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+    // Shift out the last bits including any tags.
+    int_addr >>= kPointerSizeLog2;
+    int hash1 =
+        ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
+    int hash2 =
+        ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
+    } else {
+      // Rather than slowing down we just throw away some entries.  This will
+      // cause some duplicates to remain undetected.
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
+    }
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+    ASSERT(old_top_ <= old_limit_);
+  }
+  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+  CheckForFullBuffer();
+}
+
+
+void StoreBuffer::CheckForFullBuffer() {
+  if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
+    HandleFullness();
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
new file mode 100644
index 0000000..c3f77c3
--- /dev/null
+++ b/src/store-buffer.h
@@ -0,0 +1,253 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
+#include "platform.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(
+    Address start, Address end, ObjectSlotCallback slot_callback);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+  explicit StoreBuffer(Heap* heap);
+
+  static void StoreBufferOverflow(Isolate* isolate);
+
+  inline Address TopAddress();
+
+  void Setup();
+  void TearDown();
+
+  // This is used by the mutator to enter addresses into the store buffer.
+  inline void Mark(Address addr);
+
+  // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+  // addresses directly into the old buffer because the GC starts by wiping the
+  // old buffer and thereafter only visits each cell once so there is no need
+  // to attempt to remove any dupes.  During the first part of a GC we
+  // are using the store buffer to access the old spaces and at the same time
+  // we are rebuilding the store buffer using this function.  There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+  // stage of the scavenge can only reduce the number of addresses in the store
+  // buffer (some objects are promoted so pointers to them do not need to be in
+  // the store buffer).  The later parts of the GC scan the pages that are
+  // exempt from the store buffer and process the promotion queue.  These steps
+  // can overflow this buffer.  We check for this and on overflow we call the
+  // callback set up with the StoreBufferRebuildScope object.
+  inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  // Iterates over all pointers that go from old space to new space.  It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
+  static const int kStoreBufferSize = kStoreBufferOverflowBit;
+  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;
+
+  void Compact();
+
+  void GCPrologue();
+  void GCEpilogue();
+
+  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+  void SetTop(Object*** top) {
+    ASSERT(top >= Start());
+    ASSERT(top <= Limit());
+    old_top_ = reinterpret_cast<Address*>(top);
+  }
+
+  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+  // Goes through the store buffer removing pointers to things that have
+  // been promoted.  Rebuilds the store buffer completely if it overflowed.
+  void SortUniq();
+
+  void HandleFullness();
+  void Verify();
+
+  bool PrepareForIteration();
+
+#ifdef DEBUG
+  void Clean();
+  // Slow, for asserts only.
+  bool CellIsInStoreBuffer(Address cell);
+#endif
+
+  void Filter(int flag);
+
+ private:
+  Heap* heap_;
+
+  // The store buffer is divided up into a new buffer that is constantly being
+  // filled by mutator activity and an old buffer that is filled with the data
+  // from the new buffer after compression.
+  Address* start_;
+  Address* limit_;
+
+  Address* old_start_;
+  Address* old_limit_;
+  Address* old_top_;
+
+  bool old_buffer_is_sorted_;
+  bool old_buffer_is_filtered_;
+  bool during_gc_;
+  // The garbage collector iterates over many pointers to new space that are not
+  // handled by the store buffer.  This flag indicates whether the pointers
+  // found by the callbacks should be added to the store buffer or not.
+  bool store_buffer_rebuilding_enabled_;
+  StoreBufferCallback callback_;
+  bool may_move_store_buffer_entries_;
+
+  VirtualMemory* virtual_memory_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();
+
+  void CheckForFullBuffer();
+  void Uniq();
+  void ExemptPopularPages(int prime_sample_step, int threshold);
+
+  void FindPointersToNewSpaceInRegion(Address start,
+                                      Address end,
+                                      ObjectSlotCallback slot_callback);
+
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(
+      PagedSpace* space,
+      Page* page,
+      RegionCallback region_callback,
+      ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback);
+
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+
+#ifdef DEBUG
+  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
+  void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  explicit StoreBufferRebuildScope(Heap* heap,
+                                   StoreBuffer* store_buffer,
+                                   StoreBufferCallback callback)
+      : heap_(heap),
+        store_buffer_(store_buffer),
+        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+        stored_callback_(store_buffer->callback_) {
+    store_buffer_->store_buffer_rebuilding_enabled_ = true;
+    store_buffer_->callback_ = callback;
+    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+  }
+
+  ~StoreBufferRebuildScope() {
+    store_buffer_->callback_ = stored_callback_;
+    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+    store_buffer_->CheckForFullBuffer();
+  }
+
+ private:
+  Heap* heap_;
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+  StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->may_move_store_buffer_entries_) {
+    store_buffer_->may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_H_
diff --git a/src/string-search.h b/src/string-search.h
index 1223db0..f540583 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -242,9 +242,9 @@
 
 
 template <typename PatternChar, typename SubjectChar>
-static inline bool CharCompare(const PatternChar* pattern,
-                               const SubjectChar* subject,
-                               int length) {
+inline bool CharCompare(const PatternChar* pattern,
+                        const SubjectChar* subject,
+                        int length) {
   ASSERT(length > 0);
   int pos = 0;
   do {
@@ -555,10 +555,10 @@
 // object should be constructed once and the Search function then called
 // for each search.
 template <typename SubjectChar, typename PatternChar>
-static int SearchString(Isolate* isolate,
-                        Vector<const SubjectChar> subject,
-                        Vector<const PatternChar> pattern,
-                        int start_index) {
+int SearchString(Isolate* isolate,
+                 Vector<const SubjectChar> subject,
+                 Vector<const PatternChar> pattern,
+                 int start_index) {
   StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
   return search.Search(subject, start_index);
 }
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8086cf9..35f7be5 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -350,29 +350,24 @@
   }
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    switch (descs->GetType(i)) {
-      case FIELD: {
-        Object* key = descs->GetKey(i);
-        if (key->IsString() || key->IsNumber()) {
-          int len = 3;
-          if (key->IsString()) {
-            len = String::cast(key)->length();
-          }
-          for (; len < 18; len++)
-            Put(' ');
-          if (key->IsString()) {
-            Put(String::cast(key));
-          } else {
-            key->ShortPrint();
-          }
-          Add(": ");
-          Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
-          Add("%o\n", value);
+    if (descs->GetType(i) == FIELD) {
+      Object* key = descs->GetKey(i);
+      if (key->IsString() || key->IsNumber()) {
+        int len = 3;
+        if (key->IsString()) {
+          len = String::cast(key)->length();
         }
+        for (; len < 18; len++)
+          Put(' ');
+        if (key->IsString()) {
+          Put(String::cast(key));
+        } else {
+          key->ShortPrint();
+        }
+        Add(": ");
+        Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+        Add("%o\n", value);
       }
-      break;
-      default:
-      break;
     }
   }
 }
diff --git a/src/string.js b/src/string.js
index 297105d..3608bac 100644
--- a/src/string.js
+++ b/src/string.js
@@ -46,16 +46,18 @@
 
 // ECMA-262 section 15.5.4.2
 function StringToString() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
     throw new $TypeError('String.prototype.toString is not generic');
+  }
   return %_ValueOf(this);
 }
 
 
 // ECMA-262 section 15.5.4.3
 function StringValueOf() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
     throw new $TypeError('String.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -91,7 +93,8 @@
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
-    throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.concat"]);
   }
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
@@ -358,7 +361,7 @@
       builder_elements.push(SubString(string, position, next));
     }
   }
-};
+}
 
 
 // Compute the string of a given regular expression capture.
@@ -371,7 +374,7 @@
   if (start < 0) return;
   var end = lastCaptureInfo[CAPTURE(scaled + 1)];
   return SubString(string, start, end);
-};
+}
 
 
 // Add the string of a given regular expression capture to the
@@ -384,7 +387,7 @@
   if (start < 0) return;
   var end = matchInfo[CAPTURE(scaled + 1)];
   builder.addSpecialSlice(start, end);
-};
+}
 
 // TODO(lrn): This array will survive indefinitely if replace is never
 // called again. However, it will be empty, since the contents are cleared
@@ -531,30 +534,36 @@
   var s_len = s.length;
   var start_i = TO_INTEGER(start);
   var end_i = s_len;
-  if (end !== void 0)
+  if (end !== void 0) {
     end_i = TO_INTEGER(end);
+  }
 
   if (start_i < 0) {
     start_i += s_len;
-    if (start_i < 0)
+    if (start_i < 0) {
       start_i = 0;
+    }
   } else {
-    if (start_i > s_len)
+    if (start_i > s_len) {
       start_i = s_len;
+    }
   }
 
   if (end_i < 0) {
     end_i += s_len;
-    if (end_i < 0)
+    if (end_i < 0) {
       end_i = 0;
+    }
   } else {
-    if (end_i > s_len)
+    if (end_i > s_len) {
       end_i = s_len;
+    }
   }
 
   var num_c = end_i - start_i;
-  if (num_c < 0)
+  if (num_c < 0) {
     num_c = 0;
+  }
 
   return SubString(s, start_i, start_i + num_c);
 }
@@ -568,7 +577,6 @@
   }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-  if (limit === 0) return [];
 
   // ECMA-262 says that if separator is undefined, the result should
   // be an array of size 1 containing the entire string.  SpiderMonkey
@@ -582,6 +590,9 @@
   var length = subject.length;
   if (!IS_REGEXP(separator)) {
     separator = TO_STRING_INLINE(separator);
+
+    if (limit === 0) return [];
+
     var separator_length = separator.length;
 
     // If the separator string is empty then return the elements in the subject.
@@ -592,6 +603,8 @@
     return result;
   }
 
+  if (limit === 0) return [];
+
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
   if (length === 0) {
@@ -688,7 +701,7 @@
     }
   }
 
-  return (start_i + 1 == end_i
+  return ((start_i + 1 == end_i)
           ? %_StringCharAt(s, start_i)
           : %_SubString(s, start_i, end_i));
 }
@@ -732,7 +745,7 @@
   var end = start + len;
   if (end > s.length) end = s.length;
 
-  return (start + 1 == end
+  return ((start + 1 == end)
           ? %_StringCharAt(s, start)
           : %_SubString(s, start, end));
 }
@@ -832,7 +845,7 @@
                               .replace(/>/g, "&gt;")
                               .replace(/"/g, "&quot;")
                               .replace(/'/g, "&#039;");
-};
+}
 
 
 // Compatibility support for KJS.
@@ -953,7 +966,7 @@
 
 
   // Set up the non-enumerable functions on the String prototype object.
-  InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
+  InstallFunctions($String.prototype, DONT_ENUM, $Array(
     "valueOf", StringValueOf,
     "toString", StringToString,
     "charAt", StringCharAt,
diff --git a/src/strtod.cc b/src/strtod.cc
index c89c8f3..be79c80 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -27,7 +27,6 @@
 
 #include <stdarg.h>
 #include <math.h>
-#include <limits>
 
 #include "globals.h"
 #include "utils.h"
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index cdb4874..ea7d74f 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -55,7 +55,15 @@
   ASSERT(IsPowerOf2(kSecondaryTableSize));
   if (create_heap_objects) {
     HandleScope scope;
-    Clear();
+    Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+    for (int i = 0; i < kPrimaryTableSize; i++) {
+      primary_[i].key = heap()->empty_string();
+      primary_[i].value = empty;
+    }
+    for (int j = 0; j < kSecondaryTableSize; j++) {
+      secondary_[j].key = heap()->empty_string();
+      secondary_[j].value = empty;
+    }
   }
 }
 
@@ -101,8 +109,8 @@
 }
 
 
-MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
-                                               JSObject* receiver) {
+Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
+                                               Handle<JSObject> receiver) {
   ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
   // If no global objects are present in the prototype chain, the load
   // nonexistent IC stub can be shared for all names for a given map
@@ -110,558 +118,431 @@
   // there are global objects involved, we need to check global
   // property cells in the stub and therefore the stub will be
   // specific to the name.
-  String* cache_name = heap()->empty_string();
+  Handle<String> cache_name = factory()->empty_string();
   if (receiver->IsGlobalObject()) cache_name = name;
-  JSObject* last = receiver;
+  Handle<JSObject> last = receiver;
   while (last->GetPrototype() != heap()->null_value()) {
-    last = JSObject::cast(last->GetPrototype());
+    last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
     if (last->IsGlobalObject()) cache_name = name;
   }
   // Compile the stub that is either shared for all names or
   // name specific if there are global objects involved.
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
-  Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadNonexistent(cache_name, receiver, last);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadNonexistent(cache_name, receiver, last);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
+  JSObject::UpdateMapCodeCache(receiver, cache_name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadField(String* name,
-                                         JSObject* receiver,
-                                         JSObject* holder,
+Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
+                                         Handle<JSObject> receiver,
+                                         Handle<JSObject> holder,
                                          int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadField(receiver, holder, field_index, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadField(receiver, holder, field_index, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadCallback(String* name,
-                                            JSObject* receiver,
-                                            JSObject* holder,
-                                            AccessorInfo* callback) {
+Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
+                                            Handle<JSObject> receiver,
+                                            Handle<JSObject> holder,
+                                            Handle<AccessorInfo> callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadCallback(name, receiver, holder, callback);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadCallback(name, receiver, holder, callback);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadConstant(String* name,
-                                            JSObject* receiver,
-                                            JSObject* holder,
-                                            Object* value) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
+                                            Handle<JSObject> receiver,
+                                            Handle<JSObject> holder,
+                                            Handle<Object> value) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadConstant(receiver, holder, value, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+        compiler.CompileLoadConstant(receiver, holder, value, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
-                                               JSObject* receiver,
-                                               JSObject* holder) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
+                                               Handle<JSObject> receiver,
+                                               Handle<JSObject> holder) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadInterceptor(receiver, holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+        compiler.CompileLoadInterceptor(receiver, holder, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeLoadNormal() {
-  return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
+Handle<Code> StubCache::ComputeLoadNormal() {
+  return isolate_->builtins()->LoadIC_Normal();
 }
 
 
-MaybeObject* StubCache::ComputeLoadGlobal(String* name,
-                                          JSObject* receiver,
-                                          GlobalObject* holder,
-                                          JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
+                                          Handle<JSObject> receiver,
+                                          Handle<GlobalObject> holder,
+                                          Handle<JSGlobalPropertyCell> cell,
                                           bool is_dont_delete) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    LoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadGlobal(receiver,
-                                                           holder,
-                                                           cell,
-                                                           name,
-                                                           is_dont_delete);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  LoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
+  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
-                                              JSObject* receiver,
-                                              JSObject* holder,
+Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
+                                              Handle<JSObject> receiver,
+                                              Handle<JSObject> holder,
                                               int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadField(name, receiver, holder, field_index);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadField(name, receiver, holder, field_index);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
-                                                 JSObject* receiver,
-                                                 JSObject* holder,
-                                                 Object* value) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
+                                                 Handle<JSObject> receiver,
+                                                 Handle<JSObject> holder,
+                                                 Handle<Object> value) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadConstant(name, receiver, holder, value);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadConstant(name, receiver, holder, value);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
-                                                    JSObject* receiver,
-                                                    JSObject* holder) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
+                                                    Handle<JSObject> receiver,
+                                                    Handle<JSObject> holder) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadInterceptor(receiver, holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
-                                                 JSObject* receiver,
-                                                 JSObject* holder,
-                                                 AccessorInfo* callback) {
-  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
+  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          compiler.CompileLoadCallback(name, receiver, holder, callback);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code =
+      compiler.CompileLoadCallback(name, receiver, holder, callback);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-
-MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
-                                                    JSArray* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
+                                                    Handle<JSArray> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  ASSERT(receiver->IsJSObject());
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadArrayLength(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
-                                                     String* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
+                                                     Handle<String> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Map* map = receiver->map();
-  Object* code = map->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Map> map(receiver->map());
+  Handle<Object> probe(map->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadStringLength(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  Map::UpdateCodeCache(map, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
-    String* name,
-    JSFunction* receiver) {
+Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
+    Handle<String> name,
+    Handle<JSFunction> receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedLoadStubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreField(String* name,
-                                          JSObject* receiver,
+Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
+                                          Handle<JSObject> receiver,
                                           int field_index,
-                                          Map* transition,
+                                          Handle<Map> transition,
                                           StrictModeFlag strict_mode) {
-  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, type, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreField(receiver, field_index, transition, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code =
+      compiler.CompileStoreField(receiver, field_index, transition, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
-    JSObject* receiver,
-    bool is_store,
+Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
+    Handle<JSObject> receiver,
+    KeyedIC::StubKind stub_kind,
     StrictModeFlag strict_mode) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(
-          is_store ? Code::KEYED_STORE_IC :
-                     Code::KEYED_LOAD_IC,
+          stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
+                                     : Code::KEYED_STORE_IC,
           NORMAL,
           strict_mode);
-  String* name = is_store
-      ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
-      : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
-  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
-  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+  Handle<String> name;
+  switch (stub_kind) {
+    case KeyedIC::LOAD:
+      name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
+      break;
+    case KeyedIC::STORE_NO_TRANSITION:
+      name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  Handle<Map> receiver_map(receiver->map());
+  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
 
-  MaybeObject* maybe_new_code = NULL;
-  Map* receiver_map = receiver->map();
-  if (is_store) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    maybe_new_code = compiler.CompileStoreElement(receiver_map);
+  Handle<Code> code;
+  switch (stub_kind) {
+    case KeyedIC::LOAD: {
+      KeyedLoadStubCompiler compiler(isolate_);
+      code = compiler.CompileLoadElement(receiver_map);
+      break;
+    }
+    case KeyedIC::STORE_NO_TRANSITION: {
+      KeyedStoreStubCompiler compiler(isolate_, strict_mode);
+      code = compiler.CompileStoreElement(receiver_map);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  ASSERT(!code.is_null());
+
+  if (stub_kind == KeyedIC::LOAD) {
+    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
   } else {
-    KeyedLoadStubCompiler compiler;
-    maybe_new_code = compiler.CompileLoadElement(receiver_map);
+    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
   }
-  Code* code;
-  if (!maybe_new_code->To(&code)) return maybe_new_code;
-  if (is_store) {
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(code), 0));
-  } else {
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
-                            Code::cast(code), 0));
-  }
-  ASSERT(code->IsCode());
-  Object* result;
-  { MaybeObject* maybe_result =
-        receiver->UpdateMapCodeCache(name, Code::cast(code));
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
-  return isolate_->builtins()->builtin((strict_mode == kStrictMode)
-                            ? Builtins::kStoreIC_Normal_Strict
-                            : Builtins::kStoreIC_Normal);
+Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
+  return (strict_mode == kStrictMode)
+      ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
+      : isolate_->builtins()->Builtins::StoreIC_Normal();
 }
 
 
-MaybeObject* StubCache::ComputeStoreGlobal(String* name,
-                                           GlobalObject* receiver,
-                                           JSGlobalPropertyCell* cell,
+Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
+                                           Handle<GlobalObject> receiver,
+                                           Handle<JSGlobalPropertyCell> cell,
                                            StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, NORMAL, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreGlobal(receiver, cell, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreCallback(
-    String* name,
-    JSObject* receiver,
-    AccessorInfo* callback,
-    StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
+                                             Handle<JSObject> receiver,
+                                             Handle<AccessorInfo> callback,
+                                             StrictModeFlag strict_mode) {
   ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, CALLBACKS, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreCallback(receiver, callback, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeStoreInterceptor(
-    String* name,
-    JSObject* receiver,
-    StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
+                                                Handle<JSObject> receiver,
+                                                StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, INTERCEPTOR, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    StoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreInterceptor(receiver, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  StoreStubCompiler compiler(isolate_, strict_mode);
+  Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
-
-MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
-                                               JSObject* receiver,
+Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
+                                               Handle<JSObject> receiver,
                                                int field_index,
-                                               Map* transition,
+                                               Handle<Map> transition,
                                                StrictModeFlag strict_mode) {
-  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::KEYED_STORE_IC, type, strict_mode);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code =
-          compiler.CompileStoreField(receiver, field_index, transition, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+  Handle<Code> code =
+      compiler.CompileStoreField(receiver, field_index, transition, name);
+  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(receiver, name, code);
   return code;
 }
 
+
 #define CALL_LOGGER_TAG(kind, type) \
     (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
 
-MaybeObject* StubCache::ComputeCallConstant(int argc,
+Handle<Code> StubCache::ComputeCallConstant(int argc,
                                             Code::Kind kind,
-                                            Code::ExtraICState extra_ic_state,
-                                            String* name,
-                                            Object* object,
-                                            JSObject* holder,
-                                            JSFunction* function) {
+                                            Code::ExtraICState extra_state,
+                                            Handle<String> name,
+                                            Handle<Object> object,
+                                            Handle<JSObject> holder,
+                                            Handle<JSFunction> function) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // Compute check type based on receiver/holder.
   CheckType check = RECEIVER_MAP_CHECK;
@@ -673,51 +554,36 @@
     check = BOOLEAN_CHECK;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    CONSTANT_FUNCTION,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    // If the function hasn't been compiled yet, we cannot do it now
-    // because it may cause GC. To avoid this issue, we return an
-    // internal error which will make sure we do not update any
-    // caches.
-    if (!function->is_compiled()) return Failure::InternalError();
-    // Compile the stub - only create stubs for fully compiled functions.
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallConstant(object, holder, function, name, check);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    Code::cast(code)->set_check_type(check);
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate_,
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallConstant(object, holder, function, name, check);
+  code->set_check_type(check);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate_,
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallField(int argc,
+Handle<Code> StubCache::ComputeCallField(int argc,
                                          Code::Kind kind,
-                                         Code::ExtraICState extra_ic_state,
-                                         String* name,
-                                         Object* object,
-                                         JSObject* holder,
+                                         Code::ExtraICState extra_state,
+                                         Handle<String> name,
+                                         Handle<Object> object,
+                                         Handle<JSObject> holder,
                                          int index) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -726,47 +592,35 @@
     object = holder;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    FIELD,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallField(JSObject::cast(object),
-                                    holder,
-                                    index,
-                                    name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate_,
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallField(Handle<JSObject>::cast(object),
+                                holder, index, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate_,
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallInterceptor(
-    int argc,
-    Code::Kind kind,
-    Code::ExtraICState extra_ic_state,
-    String* name,
-    Object* object,
-    JSObject* holder) {
+Handle<Code> StubCache::ComputeCallInterceptor(int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state,
+                                               Handle<String> name,
+                                               Handle<Object> object,
+                                               Handle<JSObject> holder) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(object, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
+      IC::GetCodeCacheForObject(*object, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -775,135 +629,61 @@
     object = holder;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    INTERCEPTOR,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate(),
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
+                                      holder, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate(),
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallNormal(int argc,
+Handle<Code> StubCache::ComputeCallGlobal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state,
-                                          String* name,
-                                          JSObject* receiver) {
-  Object* code;
-  { MaybeObject* maybe_code = ComputeCallNormal(argc, kind, extra_ic_state);
-    if (!maybe_code->ToObject(&code)) return maybe_code;
-  }
-  return code;
-}
-
-
-MaybeObject* StubCache::ComputeCallGlobal(int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state,
-                                          String* name,
-                                          JSObject* receiver,
-                                          GlobalObject* holder,
-                                          JSGlobalPropertyCell* cell,
-                                          JSFunction* function) {
+                                          Code::ExtraICState extra_state,
+                                          Handle<String> name,
+                                          Handle<JSObject> receiver,
+                                          Handle<GlobalObject> holder,
+                                          Handle<JSGlobalPropertyCell> cell,
+                                          Handle<JSFunction> function) {
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(receiver, holder);
-  JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
-  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
-                                                    NORMAL,
-                                                    extra_ic_state,
-                                                    cache_holder,
-                                                    argc);
-  Object* code = map_holder->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    // If the function hasn't been compiled yet, we cannot do it now
-    // because it may cause GC. To avoid this issue, we return an
-    // internal error which will make sure we do not update any
-    // caches.
-    if (!function->is_compiled()) return Failure::InternalError();
-    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
-    { MaybeObject* maybe_code =
-          compiler.CompileCallGlobal(receiver, holder, cell, function, name);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    ASSERT_EQ(flags, Code::cast(code)->flags());
-    PROFILE(isolate(),
-            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
-                            Code::cast(code), name));
-    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
-    Object* result;
-    { MaybeObject* maybe_result =
-          map_holder->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
+                                    cache_holder, argc);
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+  Handle<Code> code =
+      compiler.CompileCallGlobal(receiver, holder, cell, function, name);
+  ASSERT_EQ(flags, code->flags());
+  PROFILE(isolate(),
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
 
-static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
-  // Use raw_unchecked... so we don't get assert failures during GC.
-  UnseededNumberDictionary* dictionary =
-      isolate->heap()->raw_unchecked_non_monomorphic_cache();
-  int entry = dictionary->FindEntry(isolate, flags);
-  if (entry != -1) return dictionary->ValueAt(entry);
-  return isolate->heap()->raw_unchecked_undefined_value();
-}
-
-
-MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
-                                               Code::Flags flags) {
-  Heap* heap = isolate->heap();
-  Object* probe = GetProbeValue(isolate, flags);
-  if (probe != heap->undefined_value()) return probe;
-  // Seed the cache with an undefined value to make sure that any
-  // generated code object can always be inserted into the cache
-  // without causing  allocation failures.
-  Object* result;
-  { MaybeObject* maybe_result =
-        heap->non_monomorphic_cache()->AtNumberPut(flags,
-                                                   heap->undefined_value());
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  heap->public_set_non_monomorphic_cache(
-      UnseededNumberDictionary::cast(result));
-  return probe;
-}
-
-
-static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
-  Object* code;
-  if (maybe_code->ToObject(&code)) {
-    if (code->IsCode()) {
-      Heap* heap = isolate->heap();
-      int entry = heap->non_monomorphic_cache()->FindEntry(
-          Code::cast(code)->flags());
-      // The entry must be present see comment in ProbeCache.
-      ASSERT(entry != -1);
-      ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
-             heap->undefined_value());
-      heap->non_monomorphic_cache()->ValueAtPut(entry, code);
-      CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
-    }
-  }
-  return maybe_code;
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+  Handle<NumberDictionary> dictionary =
+      NumberDictionarySet(isolate->factory()->non_monomorphic_cache(),
+                          code->flags(),
+                          code,
+                          PropertyDetails(NONE, NORMAL));
+  isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
 }
 
 
@@ -913,209 +693,192 @@
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         UNINITIALIZED,
-                                         extra_state,
-                                         NORMAL,
-                                         argc);
-  Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
-  ASSERT(result != heap()->undefined_value());
+  Code::Flags flags =
+      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+
+  // Use raw_unchecked... so we don't get assert failures during GC.
+  NumberDictionary* dictionary =
+      isolate()->heap()->raw_unchecked_non_monomorphic_cache();
+  int entry = dictionary->FindEntry(isolate(), flags);
+  ASSERT(entry != -1);
+  Object* code = dictionary->ValueAt(entry);
   // This might be called during the marking phase of the collector
   // hence the unchecked cast.
-  return reinterpret_cast<Code*>(result);
+  return reinterpret_cast<Code*>(code);
 }
 
 
-MaybeObject* StubCache::ComputeCallInitialize(int argc,
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
                                               RelocInfo::Mode mode,
                                               Code::Kind kind) {
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         UNINITIALIZED,
-                                         extra_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallInitialize(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallInitialize(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
-                                              RelocInfo::Mode mode) {
-  CALL_HEAP_FUNCTION(isolate_,
-                     ComputeCallInitialize(argc, mode, Code::CALL_IC),
-                     Code);
+Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
+  return ComputeCallInitialize(argc, mode, Code::CALL_IC);
 }
 
 
 Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
-  CALL_HEAP_FUNCTION(
-      isolate_,
-      ComputeCallInitialize(argc, RelocInfo::CODE_TARGET, Code::KEYED_CALL_IC),
-      Code);
+  return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
+                               Code::KEYED_CALL_IC);
 }
 
 
-MaybeObject* StubCache::ComputeCallPreMonomorphic(
+Handle<Code> StubCache::ComputeCallPreMonomorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         PREMONOMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
+    Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallNormal(int argc,
+Handle<Code> StubCache::ComputeCallNormal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MONOMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallNormal(flags));
+                                          Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallNormal(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
   ASSERT(kind == Code::KEYED_CALL_IC);
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MEGAMORPHIC,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallArguments(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallArguments(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallMegamorphic(
+Handle<Code> StubCache::ComputeCallMegamorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_ic_state) {
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MEGAMORPHIC,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
+    Code::ExtraICState extra_state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
+                         NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallMegamorphic(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallMiss(int argc,
+Handle<Code> StubCache::ComputeCallMiss(int argc,
                                         Code::Kind kind,
-                                        Code::ExtraICState extra_ic_state) {
+                                        Code::ExtraICState extra_state) {
   // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
   // and monomorphic stubs are not mixed up together in the stub cache.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         MONOMORPHIC_PROTOTYPE_FAILURE,
-                                         extra_ic_state,
-                                         NORMAL,
-                                         argc,
-                                         OWN_MAP);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallMiss(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
+                         NORMAL, argc, OWN_MAP);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallMiss(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(
-    int argc,
-    Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
+                                              Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         DEBUG_BREAK,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallDebugBreak(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 
 
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
-    int argc,
-    Code::Kind kind) {
+Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
+                                                      Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags = Code::ComputeFlags(kind,
-                                         DEBUG_PREPARE_STEP_IN,
-                                         Code::kNoExtraICState,
-                                         NORMAL,
-                                         argc);
-  Object* probe;
-  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
+  Code::Flags flags =
+      Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
+                         NORMAL, argc);
+  Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate_, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  StubCompiler compiler(isolate_);
+  Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
+  FillCache(isolate_, code);
+  return code;
 }
 #endif
 
 
 void StubCache::Clear() {
+  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = heap()->empty_string();
-    primary_[i].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    primary_[i].value = empty;
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = heap()->empty_string();
-    secondary_[j].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    secondary_[j].value = empty;
   }
 }
 
 
 void StubCache::CollectMatchingMaps(SmallMapList* types,
                                     String* name,
-                                    Code::Flags flags) {
+                                    Code::Flags flags,
+                                    Handle<Context> global_context) {
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == name) {
       Map* map = primary_[i].value->FindFirstMap();
@@ -1124,7 +887,8 @@
       if (map == NULL) continue;
 
       int offset = PrimaryOffset(name, flags, map);
-      if (entry(primary_, offset) == &primary_[i]) {
+      if (entry(primary_, offset) == &primary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -1147,7 +911,8 @@
 
       // Lookup in secondary table and add matches.
       int offset = SecondaryOffset(name, flags, primary_offset);
-      if (entry(secondary_, offset) == &secondary_[i]) {
+      if (entry(secondary_, offset) == &secondary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -1342,8 +1107,8 @@
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
   Object* value = args[2];
+  ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
   StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
-  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
   ASSERT(recv->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
   MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1360,62 +1125,47 @@
 }
 
 
-MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallInitialize");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
   isolate()->counters()->call_initialize_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   // The code of the PreMonomorphic stub is the same as the code
   // of the Initialized stub.  They just differ on the code object flags.
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
   isolate()->counters()->call_premonomorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1426,116 +1176,82 @@
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
   isolate()->counters()->call_normal_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
+    CallIC::GenerateMegamorphic(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateMegamorphic(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallMegamorphic");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
   isolate()->counters()->call_megamorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
-  Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallArguments");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
-  return result;
+          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+                                          CALL_MEGAMORPHIC_TAG),
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc, extra_ic_state);
+    CallIC::GenerateMiss(masm(), argc, extra_state);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
   isolate()->counters()->call_megamorphic_stubs()->Increment();
-  Code* code = Code::cast(result);
-  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
-                          code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
-  return result;
+                          *code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
+  return code;
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
-  HandleScope scope(isolate());
+Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
   Debug::GenerateCallICDebugBreak(masm());
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallDebugBreak");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  USE(kind);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
-                          code, code->arguments_count()));
-  return result;
+          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
+                                          CALL_DEBUG_BREAK_TAG),
+                          *code, code->arguments_count()));
+  return code;
 }
 
 
-MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
-  HandleScope scope(isolate());
-  // Use the same code for the the step in preparations as we do for
-  // the miss case.
+Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+  // Use the same code for the the step in preparations as we do for the
+  // miss case.
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1544,133 +1260,94 @@
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Object* result;
-  { MaybeObject* maybe_result =
-        GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
   PROFILE(isolate(),
           CodeCreateEvent(
               CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
-              code,
+              *code,
               code->arguments_count()));
-  return result;
+  return code;
 }
-#endif
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 #undef CALL_LOGGER_TAG
 
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
-                                            const char* name) {
-  // Check for allocation failures during stub compilation.
-  if (failure_->IsFailure()) return failure_;
 
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+                                            const char* name) {
   // Create code object in the heap.
   CodeDesc desc;
   masm_.GetCode(&desc);
-  MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
+  Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
 #ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs && !result->IsFailure()) {
-    Code::cast(result->ToObjectUnchecked())->Disassemble(name);
-  }
+  if (FLAG_print_code_stubs) code->Disassemble(name);
 #endif
-  return result;
+  return code;
 }
 
 
-MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
-  if (FLAG_print_code_stubs && (name != NULL)) {
-    return GetCodeWithFlags(flags, *name->ToCString());
-  }
-  return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+                                            Handle<String> name) {
+  return (FLAG_print_code_stubs && !name.is_null())
+      ? GetCodeWithFlags(flags, *name->ToCString())
+      : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
 }
 
 
-void StubCompiler::LookupPostInterceptor(JSObject* holder,
-                                         String* name,
+void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
+                                         Handle<String> name,
                                          LookupResult* lookup) {
-  holder->LocalLookupRealNamedProperty(name, lookup);
-  if (!lookup->IsProperty()) {
-    lookup->NotFound();
-    Object* proto = holder->GetPrototype();
-    if (!proto->IsNull()) {
-      proto->Lookup(name, lookup);
-    }
-  }
+  holder->LocalLookupRealNamedProperty(*name, lookup);
+  if (lookup->IsProperty()) return;
+
+  lookup->NotFound();
+  if (holder->GetPrototype()->IsNull()) return;
+
+  holder->GetPrototype()->Lookup(*name, lookup);
 }
 
 
-
-MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
-                                            String* name,
+Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
+                                            Handle<String> name,
                                             InlineCacheState state) {
   Code::Flags flags = Code::ComputeFlags(
       Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
+                                        Handle<String> name) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::STORE_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::STORE_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+  return code;
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
-                                             String* name,
+Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
+                                             Handle<String> name,
                                              InlineCacheState state) {
   Code::Flags flags =
       Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
-  MaybeObject* result = GetCodeWithFlags(flags, name);
-  if (!result->IsFailure()) {
-    PROFILE(isolate(),
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
-                            Code::cast(result->ToObjectUnchecked()),
-                            name));
-    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
-                   name,
-                   Code::cast(result->ToObjectUnchecked())));
-  }
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
+  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+  return code;
 }
 
 
@@ -1680,50 +1357,49 @@
 }
 
 
-CallStubCompiler::CallStubCompiler(int argc,
+CallStubCompiler::CallStubCompiler(Isolate* isolate,
+                                   int argc,
                                    Code::Kind kind,
-                                   Code::ExtraICState extra_ic_state,
+                                   Code::ExtraICState extra_state,
                                    InlineCacheHolderFlag cache_holder)
-    : arguments_(argc),
+    : StubCompiler(isolate),
+      arguments_(argc),
       kind_(kind),
-      extra_ic_state_(extra_ic_state),
+      extra_state_(extra_state),
       cache_holder_(cache_holder) {
 }
 
 
-bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
 #define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
   }
+
   CallOptimization optimization(function);
-  if (optimization.is_simple_api_call()) {
-    return true;
-  }
-  return false;
+  return optimization.is_simple_api_call();
 }
 
 
-MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* fname) {
+Handle<Code> CallStubCompiler::CompileCustomCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> fname) {
   ASSERT(HasCustomCallGenerator(function));
 
-  SharedFunctionInfo* info = function->shared();
-  if (info->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = info->builtin_function_id();
-#define CALL_GENERATOR_CASE(name)                           \
-    if (id == k##name) {                                    \
-      return CallStubCompiler::Compile##name##Call(object,  \
-                                                  holder,   \
-                                                  cell,     \
-                                                  function, \
-                                                  fname);   \
+  if (function->shared()->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function->shared()->builtin_function_id();
+#define CALL_GENERATOR_CASE(name)                               \
+    if (id == k##name) {                                        \
+      return CallStubCompiler::Compile##name##Call(object,      \
+                                                   holder,      \
+                                                   cell,        \
+                                                   function,    \
+                                                   fname);      \
     }
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
@@ -1739,100 +1415,99 @@
 }
 
 
-MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
+Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
   int argc = arguments_.immediate();
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
                                                     type,
-                                                    extra_ic_state_,
+                                                    extra_state_,
                                                     cache_holder_,
                                                     argc);
   return GetCodeWithFlags(flags, name);
 }
 
 
-MaybeObject* CallStubCompiler::GetCode(JSFunction* function) {
-  String* function_name = NULL;
+Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
+  Handle<String> function_name;
   if (function->shared()->name()->IsString()) {
-    function_name = String::cast(function->shared()->name());
+    function_name = Handle<String>(String::cast(function->shared()->name()));
   }
   return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
 
-MaybeObject* ConstructStubCompiler::GetCode() {
+Handle<Code> ConstructStubCompiler::GetCode() {
   Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ConstructStub");
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  Code* code = Code::cast(result);
-  USE(code);
-  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
-  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
-  return result;
+  Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
+  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
+  return code;
 }
 
 
 CallOptimization::CallOptimization(LookupResult* lookup) {
-  if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+  if (!lookup->IsProperty() ||
+      !lookup->IsCacheable() ||
       lookup->type() != CONSTANT_FUNCTION) {
-    Initialize(NULL);
+    Initialize(Handle<JSFunction>::null());
   } else {
     // We only optimize constant function calls.
-    Initialize(lookup->GetConstantFunction());
+    Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
   }
 }
 
-CallOptimization::CallOptimization(JSFunction* function) {
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
   Initialize(function);
 }
 
 
-int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
-                                                      JSObject* holder) const {
-  ASSERT(is_simple_api_call_);
-  if (expected_receiver_type_ == NULL) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(
+    Handle<JSObject> object,
+    Handle<JSObject> holder) const {
+  ASSERT(is_simple_api_call());
+  if (expected_receiver_type_.is_null()) return 0;
   int depth = 0;
-  while (object != holder) {
-    if (object->IsInstanceOf(expected_receiver_type_)) return depth;
-    object = JSObject::cast(object->GetPrototype());
+  while (!object.is_identical_to(holder)) {
+    if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+    object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
     ++depth;
   }
-  if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+  if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
   return kInvalidProtoDepth;
 }
 
 
-void CallOptimization::Initialize(JSFunction* function) {
-  constant_function_ = NULL;
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+  constant_function_ = Handle<JSFunction>::null();
   is_simple_api_call_ = false;
-  expected_receiver_type_ = NULL;
-  api_call_info_ = NULL;
+  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+  api_call_info_ = Handle<CallHandlerInfo>::null();
 
-  if (function == NULL || !function->is_compiled()) return;
+  if (function.is_null() || !function->is_compiled()) return;
 
   constant_function_ = function;
   AnalyzePossibleApiFunction(function);
 }
 
 
-void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
-  SharedFunctionInfo* sfi = function->shared();
-  if (!sfi->IsApiFunction()) return;
-  FunctionTemplateInfo* info = sfi->get_api_func_data();
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+  if (!function->shared()->IsApiFunction()) return;
+  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
 
   // Require a C++ callback.
   if (info->call_code()->IsUndefined()) return;
-  api_call_info_ = CallHandlerInfo::cast(info->call_code());
+  api_call_info_ =
+      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
 
   // Accept signatures that either have no restrictions at all or
   // only have restrictions on the receiver.
   if (!info->signature()->IsUndefined()) {
-    SignatureInfo* signature = SignatureInfo::cast(info->signature());
+    Handle<SignatureInfo> signature =
+        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
     if (!signature->args()->IsUndefined()) return;
     if (!signature->receiver()->IsUndefined()) {
       expected_receiver_type_ =
-          FunctionTemplateInfo::cast(signature->receiver());
+          Handle<FunctionTemplateInfo>(
+              FunctionTemplateInfo::cast(signature->receiver()));
     }
   }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 18c157b..0843925 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -30,6 +30,7 @@
 
 #include "allocation.h"
 #include "arguments.h"
+#include "ic-inl.h"
 #include "macro-assembler.h"
 #include "objects.h"
 #include "zone-inl.h"
@@ -75,207 +76,167 @@
 
   // Computes the right stub matching. Inserts the result in the
   // cache before returning.  This might compile a stub if needed.
-  MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
-      String* name,
-      JSObject* receiver);
+  Handle<Code> ComputeLoadNonexistent(Handle<String> name,
+                                      Handle<JSObject> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
-                                                JSObject* receiver,
-                                                JSObject* holder,
-                                                int field_index);
+  Handle<Code> ComputeLoadField(Handle<String> name,
+                                Handle<JSObject> receiver,
+                                Handle<JSObject> holder,
+                                int field_index);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      AccessorInfo* callback);
+  Handle<Code> ComputeLoadCallback(Handle<String> name,
+                                   Handle<JSObject> receiver,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
-                                                   JSObject* receiver,
-                                                   JSObject* holder,
-                                                   Object* value);
+  Handle<Code> ComputeLoadConstant(Handle<String> name,
+                                   Handle<JSObject> receiver,
+                                   Handle<JSObject> holder,
+                                   Handle<Object> value);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder);
+  Handle<Code> ComputeLoadInterceptor(Handle<String> name,
+                                      Handle<JSObject> receiver,
+                                      Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
+  Handle<Code> ComputeLoadNormal();
 
-
-  MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
-      String* name,
-      JSObject* receiver,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      bool is_dont_delete);
-
+  Handle<Code> ComputeLoadGlobal(Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 bool is_dont_delete);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
-                                                     int field_index);
+  Handle<Code> ComputeKeyedLoadField(Handle<String> name,
+                                     Handle<JSObject> receiver,
+                                     Handle<JSObject> holder,
+                                     int field_index);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      AccessorInfo* callback);
+  Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
+                                        Handle<JSObject> receiver,
+                                        Handle<JSObject> holder,
+                                        Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder,
-      Object* value);
+  Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
+                                        Handle<JSObject> receiver,
+                                        Handle<JSObject> holder,
+                                        Handle<Object> value);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
-      String* name,
-      JSObject* receiver,
-      JSObject* holder);
+  Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
+                                           Handle<JSObject> receiver,
+                                           Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
-      String* name,
-      JSArray* receiver);
+  Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
+                                           Handle<JSArray> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
-      String* name,
-      String* receiver);
+  Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
+                                            Handle<String> receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
-      String* name,
-      JSFunction* receiver);
+  Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
+                                                 Handle<JSFunction> receiver);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreField(
-      String* name,
-      JSObject* receiver,
-      int field_index,
-      Map* transition,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreField(Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 int field_index,
+                                 Handle<Map> transition,
+                                 StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
-      String* name,
-      GlobalObject* receiver,
-      JSGlobalPropertyCell* cell,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreGlobal(Handle<String> name,
+                                  Handle<GlobalObject> receiver,
+                                  Handle<JSGlobalPropertyCell> cell,
+                                  StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
-      String* name,
-      JSObject* receiver,
-      AccessorInfo* callback,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreCallback(Handle<String> name,
+                                    Handle<JSObject> receiver,
+                                    Handle<AccessorInfo> callback,
+                                    StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
-      String* name,
-      JSObject* receiver,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeStoreInterceptor(Handle<String> name,
+                                       Handle<JSObject> receiver,
+                                       StrictModeFlag strict_mode);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
-      String* name,
-      JSObject* receiver,
-      int field_index,
-      Map* transition,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeKeyedStoreField(Handle<String> name,
+                                      Handle<JSObject> receiver,
+                                      int field_index,
+                                      Handle<Map> transition,
+                                      StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
-      JSObject* receiver,
-      bool is_store,
-      StrictModeFlag strict_mode);
+  Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
+                                              KeyedIC::StubKind stub_kind,
+                                              StrictModeFlag strict_mode);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallField(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder,
-      int index);
+  Handle<Code> ComputeCallField(int argc,
+                                Code::Kind,
+                                Code::ExtraICState extra_state,
+                                Handle<String> name,
+                                Handle<Object> object,
+                                Handle<JSObject> holder,
+                                int index);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallConstant(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder,
-      JSFunction* function);
+  Handle<Code> ComputeCallConstant(int argc,
+                                   Code::Kind,
+                                   Code::ExtraICState extra_state,
+                                   Handle<String> name,
+                                   Handle<Object> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> function);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      JSObject* receiver);
+  Handle<Code> ComputeCallInterceptor(int argc,
+                                      Code::Kind,
+                                      Code::ExtraICState extra_state,
+                                      Handle<String> name,
+                                      Handle<Object> object,
+                                      Handle<JSObject> holder);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      Object* object,
-      JSObject* holder);
-
-  MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
-      int argc,
-      Code::Kind,
-      Code::ExtraICState extra_ic_state,
-      String* name,
-      JSObject* receiver,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function);
+  Handle<Code> ComputeCallGlobal(int argc,
+                                 Code::Kind,
+                                 Code::ExtraICState extra_state,
+                                 Handle<String> name,
+                                 Handle<JSObject> receiver,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function);
 
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
-                                                     RelocInfo::Mode mode,
-                                                     Code::Kind kind);
-
-  Handle<Code> ComputeCallInitialize(int argc,
-                                     RelocInfo::Mode mode);
+  Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
 
   Handle<Code> ComputeKeyedCallInitialize(int argc);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
-      int argc,
-      Code::Kind kind,
-      Code::ExtraICState extra_ic_state);
+  Handle<Code> ComputeCallPreMonomorphic(int argc,
+                                         Code::Kind kind,
+                                         Code::ExtraICState extra_state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
-                                                 Code::Kind kind,
-                                                 Code::ExtraICState state);
+  Handle<Code> ComputeCallNormal(int argc,
+                                 Code::Kind kind,
+                                 Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
-                                                    Code::Kind kind);
+  Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
-                                                      Code::Kind kind,
-                                                      Code::ExtraICState state);
+  Handle<Code> ComputeCallMegamorphic(int argc,
+                                      Code::Kind kind,
+                                      Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState state);
+  Handle<Code> ComputeCallMiss(int argc,
+                               Code::Kind kind,
+                               Code::ExtraICState state);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  MUST_USE_RESULT Code* FindCallInitialize(int argc,
-                                           RelocInfo::Mode mode,
-                                           Code::Kind kind);
+  Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
+  Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
-                                                             Code::Kind kind);
+  Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
 #endif
 
   // Update cache for entry hash(name, map).
@@ -287,7 +248,8 @@
   // Collect all maps that match the name and flags.
   void CollectMatchingMaps(SmallMapList* types,
                            String* name,
-                           Code::Flags flags);
+                           Code::Flags flags,
+                           Handle<Context> global_context);
 
   // Generate code for probing the stub cache table.
   // Arguments extra and extra2 may be used to pass additional scratch
@@ -329,16 +291,14 @@
 
   Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
+  Factory* factory() { return isolate()->factory(); }
 
  private:
   explicit StubCache(Isolate* isolate);
 
-  friend class Isolate;
-  friend class SCTableReference;
-  static const int kPrimaryTableSize = 2048;
-  static const int kSecondaryTableSize = 512;
-  Entry primary_[kPrimaryTableSize];
-  Entry secondary_[kSecondaryTableSize];
+  Handle<Code> ComputeCallInitialize(int argc,
+                                     RelocInfo::Mode mode,
+                                     Code::Kind kind);
 
   // Computes the hashed offsets for primary and secondary caches.
   static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
@@ -383,8 +343,16 @@
         reinterpret_cast<Address>(table) + (offset << shift_amount));
   }
 
+  static const int kPrimaryTableSize = 2048;
+  static const int kSecondaryTableSize = 512;
+
+  Entry primary_[kPrimaryTableSize];
+  Entry secondary_[kSecondaryTableSize];
   Isolate* isolate_;
 
+  friend class Isolate;
+  friend class SCTableReference;
+
   DISALLOW_COPY_AND_ASSIGN(StubCache);
 };
 
@@ -406,21 +374,24 @@
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
 
 
-// The stub compiler compiles stubs for the stub cache.
+// The stub compilers compile stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  StubCompiler()
-      : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
+  explicit StubCompiler(Isolate* isolate)
+      : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
 
-  MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
+  // Functions to compile either CallIC or KeyedCallIC.  The specific kind
+  // is extracted from the code flags.
+  Handle<Code> CompileCallInitialize(Code::Flags flags);
+  Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
+  Handle<Code> CompileCallNormal(Code::Flags flags);
+  Handle<Code> CompileCallMegamorphic(Code::Flags flags);
+  Handle<Code> CompileCallArguments(Code::Flags flags);
+  Handle<Code> CompileCallMiss(Code::Flags flags);
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
-  MUST_USE_RESULT MaybeObject* CompileCallDebugPrepareStepIn(Code::Flags flags);
+  Handle<Code> CompileCallDebugBreak(Code::Flags flags);
+  Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
 #endif
 
   // Static functions for generating parts of stubs.
@@ -440,8 +411,10 @@
                                                         Label* miss);
 
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
-                                       Register dst, Register src,
-                                       JSObject* holder, int index);
+                                       Register dst,
+                                       Register src,
+                                       Handle<JSObject> holder,
+                                       int index);
 
   static void GenerateLoadArrayLength(MacroAssembler* masm,
                                       Register receiver,
@@ -462,9 +435,9 @@
                                             Label* miss_label);
 
   static void GenerateStoreField(MacroAssembler* masm,
-                                 JSObject* object,
+                                 Handle<JSObject> object,
                                  int index,
-                                 Map* transition,
+                                 Handle<Map> transition,
                                  Register receiver_reg,
                                  Register name_reg,
                                  Register scratch,
@@ -490,88 +463,87 @@
   // The function can optionally (when save_at_depth !=
   // kInvalidProtoDepth) save the object at the given depth by moving
   // it to [esp + kPointerSize].
-
-  Register CheckPrototypes(JSObject* object,
+  Register CheckPrototypes(Handle<JSObject> object,
                            Register object_reg,
-                           JSObject* holder,
+                           Handle<JSObject> holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           String* name,
+                           Handle<String> name,
                            Label* miss) {
     return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
                            scratch2, name, kInvalidProtoDepth, miss);
   }
 
-  Register CheckPrototypes(JSObject* object,
+  Register CheckPrototypes(Handle<JSObject> object,
                            Register object_reg,
-                           JSObject* holder,
+                           Handle<JSObject> holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           String* name,
+                           Handle<String> name,
                            int save_at_depth,
                            Label* miss);
 
  protected:
-  MaybeObject* GetCodeWithFlags(Code::Flags flags, const char* name);
-  MaybeObject* GetCodeWithFlags(Code::Flags flags, String* name);
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
 
   MacroAssembler* masm() { return &masm_; }
   void set_failure(Failure* failure) { failure_ = failure; }
 
-  void GenerateLoadField(JSObject* object,
-                         JSObject* holder,
+  void GenerateLoadField(Handle<JSObject> object,
+                         Handle<JSObject> holder,
                          Register receiver,
                          Register scratch1,
                          Register scratch2,
                          Register scratch3,
                          int index,
-                         String* name,
+                         Handle<String> name,
                          Label* miss);
 
-  MaybeObject* GenerateLoadCallback(JSObject* object,
-                                    JSObject* holder,
-                                    Register receiver,
-                                    Register name_reg,
-                                    Register scratch1,
-                                    Register scratch2,
-                                    Register scratch3,
-                                    AccessorInfo* callback,
-                                    String* name,
-                                    Label* miss);
+  void GenerateLoadCallback(Handle<JSObject> object,
+                            Handle<JSObject> holder,
+                            Register receiver,
+                            Register name_reg,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Handle<AccessorInfo> callback,
+                            Handle<String> name,
+                            Label* miss);
 
-  void GenerateLoadConstant(JSObject* object,
-                            JSObject* holder,
+  void GenerateLoadConstant(Handle<JSObject> object,
+                            Handle<JSObject> holder,
                             Register receiver,
                             Register scratch1,
                             Register scratch2,
                             Register scratch3,
-                            Object* value,
-                            String* name,
+                            Handle<Object> value,
+                            Handle<String> name,
                             Label* miss);
 
-  void GenerateLoadInterceptor(JSObject* object,
-                               JSObject* holder,
+  void GenerateLoadInterceptor(Handle<JSObject> object,
+                               Handle<JSObject> holder,
                                LookupResult* lookup,
                                Register receiver,
                                Register name_reg,
                                Register scratch1,
                                Register scratch2,
                                Register scratch3,
-                               String* name,
+                               Handle<String> name,
                                Label* miss);
 
-  static void LookupPostInterceptor(JSObject* holder,
-                                    String* name,
+  static void LookupPostInterceptor(Handle<JSObject> holder,
+                                    Handle<String> name,
                                     LookupResult* lookup);
 
-  Isolate* isolate() { return scope_.isolate(); }
+  Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
   Factory* factory() { return isolate()->factory(); }
 
  private:
-  HandleScope scope_;
+  Isolate* isolate_;
   MacroAssembler masm_;
   Failure* failure_;
 };
@@ -579,70 +551,75 @@
 
 class LoadStubCompiler: public StubCompiler {
  public:
-  MUST_USE_RESULT MaybeObject* CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last);
+  explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileLoadField(JSObject* object,
-                                                JSObject* holder,
-                                                int index,
-                                                String* name);
+  Handle<Code> CompileLoadNonexistent(Handle<String> name,
+                                      Handle<JSObject> object,
+                                      Handle<JSObject> last);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
+  Handle<Code> CompileLoadField(Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index,
+                                Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name);
+  Handle<Code> CompileLoadCallback(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
+  Handle<Code> CompileLoadConstant(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<Object> value,
+                                   Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete);
+  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
+
+  Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<String> name,
+                                 bool is_dont_delete);
 
  private:
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
 };
 
 
 class KeyedLoadStubCompiler: public StubCompiler {
  public:
-  MUST_USE_RESULT MaybeObject* CompileLoadField(String* name,
-                                                JSObject* object,
-                                                JSObject* holder,
-                                                int index);
+  explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback);
+  Handle<Code> CompileLoadField(Handle<String> name,
+                                Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadConstant(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value);
+  Handle<Code> CompileLoadCallback(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<AccessorInfo> callback);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
+  Handle<Code> CompileLoadConstant(Handle<String> name,
+                                   Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<Object> value);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadArrayLength(String* name);
-  MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
-  MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
+  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
+  Handle<Code> CompileLoadArrayLength(Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
-      MapList* receiver_maps,
-      CodeList* handler_ics);
+  Handle<Code> CompileLoadStringLength(Handle<String> name);
+
+  Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
+
+  Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
+
+  Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
+                                      CodeHandleList* handler_ics);
 
   static void GenerateLoadExternalArray(MacroAssembler* masm,
                                         ElementsKind elements_kind);
@@ -654,34 +631,36 @@
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type,
-                       String* name,
+  Handle<Code> GetCode(PropertyType type,
+                       Handle<String> name,
                        InlineCacheState state = MONOMORPHIC);
 };
 
 
 class StoreStubCompiler: public StubCompiler {
  public:
-  explicit StoreStubCompiler(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
+    : StubCompiler(isolate), strict_mode_(strict_mode) { }
 
-  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
-                                                 int index,
-                                                 Map* transition,
-                                                 String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
-                                                    AccessorInfo* callbacks,
-                                                    String* name);
-  MUST_USE_RESULT MaybeObject* CompileStoreInterceptor(JSObject* object,
-                                                       String* name);
-  MUST_USE_RESULT MaybeObject* CompileStoreGlobal(GlobalObject* object,
-                                                  JSGlobalPropertyCell* holder,
-                                                  String* name);
+  Handle<Code> CompileStoreField(Handle<JSObject> object,
+                                 int index,
+                                 Handle<Map> transition,
+                                 Handle<String> name);
 
+  Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+                                    Handle<AccessorInfo> callback,
+                                    Handle<String> name);
+
+  Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
+                                       Handle<String> name);
+
+  Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
+                                  Handle<JSGlobalPropertyCell> holder,
+                                  Handle<String> name);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
 
   StrictModeFlag strict_mode_;
 };
@@ -689,22 +668,23 @@
 
 class KeyedStoreStubCompiler: public StubCompiler {
  public:
-  explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
-    : strict_mode_(strict_mode) { }
+  KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
+    : StubCompiler(isolate), strict_mode_(strict_mode) { }
 
-  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
-                                                 int index,
-                                                 Map* transition,
-                                                 String* name);
+  Handle<Code> CompileStoreField(Handle<JSObject> object,
+                                 int index,
+                                 Handle<Map> transition,
+                                 Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
+  Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
-      MapList* receiver_maps,
-      CodeList* handler_ics);
+  Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
+                                       CodeHandleList* handler_stubs,
+                                       MapHandleList* transitioned_maps);
 
   static void GenerateStoreFastElement(MacroAssembler* masm,
-                                       bool is_js_array);
+                                       bool is_js_array,
+                                       ElementsKind element_kind);
 
   static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
                                              bool is_js_array);
@@ -715,8 +695,8 @@
   static void GenerateStoreDictionaryElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type,
-                       String* name,
+  Handle<Code> GetCode(PropertyType type,
+                       Handle<String> name,
                        InlineCacheState state = MONOMORPHIC);
 
   StrictModeFlag strict_mode_;
@@ -739,105 +719,97 @@
 
 class CallStubCompiler: public StubCompiler {
  public:
-  CallStubCompiler(int argc,
+  CallStubCompiler(Isolate* isolate,
+                   int argc,
                    Code::Kind kind,
-                   Code::ExtraICState extra_ic_state,
+                   Code::ExtraICState extra_state,
                    InlineCacheHolderFlag cache_holder);
 
-  MUST_USE_RESULT MaybeObject* CompileCallField(
-      JSObject* object,
-      JSObject* holder,
-      int index,
-      String* name);
+  Handle<Code> CompileCallField(Handle<JSObject> object,
+                                Handle<JSObject> holder,
+                                int index,
+                                Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallConstant(
-      Object* object,
-      JSObject* holder,
-      JSFunction* function,
-      String* name,
-      CheckType check);
+  Handle<Code> CompileCallConstant(Handle<Object> object,
+                                   Handle<JSObject> holder,
+                                   Handle<JSFunction> function,
+                                   Handle<String> name,
+                                   CheckType check);
 
-  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
-      JSObject* object,
-      JSObject* holder,
-      String* name);
+  Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
+                                      Handle<JSObject> holder,
+                                      Handle<String> name);
 
-  MUST_USE_RESULT MaybeObject* CompileCallGlobal(
-      JSObject* object,
-      GlobalObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function,
-      String* name);
+  Handle<Code> CompileCallGlobal(Handle<JSObject> object,
+                                 Handle<GlobalObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function,
+                                 Handle<String> name);
 
-  static bool HasCustomCallGenerator(JSFunction* function);
+  static bool HasCustomCallGenerator(Handle<JSFunction> function);
 
  private:
-  // Compiles a custom call constant/global IC. For constant calls
-  // cell is NULL. Returns undefined if there is no custom call code
-  // for the given function or it can't be generated.
-  MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
-                                                 JSObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
+  // Compiles a custom call constant/global IC.  For constant calls cell is
+  // NULL.  Returns an empty handle if there is no custom call code for the
+  // given function.
+  Handle<Code> CompileCustomCall(Handle<Object> object,
+                                 Handle<JSObject> holder,
+                                 Handle<JSGlobalPropertyCell> cell,
+                                 Handle<JSFunction> function,
+                                 Handle<String> name);
 
-#define DECLARE_CALL_GENERATOR(name)                                           \
-  MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object,             \
-                                                   JSObject* holder,           \
-                                                   JSGlobalPropertyCell* cell, \
-                                                   JSFunction* function,       \
-                                                   String* fname);
+#define DECLARE_CALL_GENERATOR(name)                                    \
+  Handle<Code> Compile##name##Call(Handle<Object> object,               \
+                                   Handle<JSObject> holder,             \
+                                   Handle<JSGlobalPropertyCell> cell,   \
+                                   Handle<JSFunction> function,         \
+                                   Handle<String> fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
-  MUST_USE_RESULT MaybeObject* CompileFastApiCall(
-      const CallOptimization& optimization,
-      Object* object,
-      JSObject* holder,
-      JSGlobalPropertyCell* cell,
-      JSFunction* function,
-      String* name);
+  Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
+                                  Handle<Object> object,
+                                  Handle<JSObject> holder,
+                                  Handle<JSGlobalPropertyCell> cell,
+                                  Handle<JSFunction> function,
+                                  Handle<String> name);
 
-  const ParameterCount arguments_;
-  const Code::Kind kind_;
-  const Code::ExtraICState extra_ic_state_;
-  const InlineCacheHolderFlag cache_holder_;
+  Handle<Code> GetCode(PropertyType type, Handle<String> name);
+  Handle<Code> GetCode(Handle<JSFunction> function);
 
   const ParameterCount& arguments() { return arguments_; }
 
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  void GenerateNameCheck(Handle<String> name, Label* miss);
 
-  // Convenience function. Calls GetCode above passing
-  // CONSTANT_FUNCTION type and the name of the given function.
-  MUST_USE_RESULT MaybeObject* GetCode(JSFunction* function);
-
-  void GenerateNameCheck(String* name, Label* miss);
-
-  void GenerateGlobalReceiverCheck(JSObject* object,
-                                   JSObject* holder,
-                                   String* name,
+  void GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                   Handle<JSObject> holder,
+                                   Handle<String> name,
                                    Label* miss);
 
   // Generates code to load the function from the cell checking that
   // it still contains the same function.
-  void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                    JSFunction* function,
+  void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
+                                    Handle<JSFunction> function,
                                     Label* miss);
 
-  // Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
-  // be generated.
-  MUST_USE_RESULT MaybeObject* GenerateMissBranch();
+  // Generates a jump to CallIC miss stub.
+  void GenerateMissBranch();
+
+  const ParameterCount arguments_;
+  const Code::Kind kind_;
+  const Code::ExtraICState extra_state_;
+  const InlineCacheHolderFlag cache_holder_;
 };
 
 
 class ConstructStubCompiler: public StubCompiler {
  public:
-  explicit ConstructStubCompiler() {}
+  explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
 
-  MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
+  Handle<Code> CompileConstructStub(Handle<JSFunction> function);
 
  private:
-  MaybeObject* GetCode();
+  Handle<Code> GetCode();
 };
 
 
@@ -846,14 +818,14 @@
  public:
   explicit CallOptimization(LookupResult* lookup);
 
-  explicit CallOptimization(JSFunction* function);
+  explicit CallOptimization(Handle<JSFunction> function);
 
   bool is_constant_call() const {
-    return constant_function_ != NULL;
+    return !constant_function_.is_null();
   }
 
-  JSFunction* constant_function() const {
-    ASSERT(constant_function_ != NULL);
+  Handle<JSFunction> constant_function() const {
+    ASSERT(is_constant_call());
     return constant_function_;
   }
 
@@ -861,32 +833,32 @@
     return is_simple_api_call_;
   }
 
-  FunctionTemplateInfo* expected_receiver_type() const {
-    ASSERT(is_simple_api_call_);
+  Handle<FunctionTemplateInfo> expected_receiver_type() const {
+    ASSERT(is_simple_api_call());
     return expected_receiver_type_;
   }
 
-  CallHandlerInfo* api_call_info() const {
-    ASSERT(is_simple_api_call_);
+  Handle<CallHandlerInfo> api_call_info() const {
+    ASSERT(is_simple_api_call());
     return api_call_info_;
   }
 
   // Returns the depth of the object having the expected type in the
   // prototype chain between the two arguments.
-  int GetPrototypeDepthOfExpectedType(JSObject* object,
-                                      JSObject* holder) const;
+  int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
+                                      Handle<JSObject> holder) const;
 
  private:
-  void Initialize(JSFunction* function);
+  void Initialize(Handle<JSFunction> function);
 
   // Determines whether the given function can be called using the
   // fast api call builtin.
-  void AnalyzePossibleApiFunction(JSFunction* function);
+  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
 
-  JSFunction* constant_function_;
+  Handle<JSFunction> constant_function_;
   bool is_simple_api_call_;
-  FunctionTemplateInfo* expected_receiver_type_;
-  CallHandlerInfo* api_call_info_;
+  Handle<FunctionTemplateInfo> expected_receiver_type_;
+  Handle<CallHandlerInfo> api_call_info_;
 };
 
 
diff --git a/src/token.h b/src/token.h
index eb825c1..7a2156c 100644
--- a/src/token.h
+++ b/src/token.h
@@ -73,6 +73,7 @@
   T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
   T(INIT_LET, "=init_let", 2)  /* AST-use only. */                      \
   T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
+  T(INIT_CONST_HARMONY, "=init_const_harmony", 2)  /* AST-use only. */  \
   T(ASSIGN, "=", 2)                                                     \
   T(ASSIGN_BIT_OR, "|=", 2)                                             \
   T(ASSIGN_BIT_XOR, "^=", 2)                                            \
@@ -216,6 +217,10 @@
     return op == LT || op == LTE || op == GT || op == GTE;
   }
 
+  static bool IsEqualityOp(Value op) {
+    return op == EQ || op == EQ_STRICT;
+  }
+
   static Value NegateCompareOp(Value op) {
     ASSERT(IsCompareOp(op));
     switch (op) {
diff --git a/src/type-info.cc b/src/type-info.cc
index 4df7ece..e5f7b3e 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -60,8 +60,10 @@
 
 
 TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
-                                       Handle<Context> global_context) {
+                                       Handle<Context> global_context,
+                                       Isolate* isolate) {
   global_context_ = global_context;
+  isolate_ = isolate;
   BuildDictionary(code);
   ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
 }
@@ -69,31 +71,32 @@
 
 Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
   int entry = dictionary_->FindEntry(ast_id);
-  return entry != UnseededNumberDictionary::kNotFound
+  return entry != NumberDictionary::kNotFound
       ? Handle<Object>(dictionary_->ValueAt(entry))
-      : Isolate::Current()->factory()->undefined_value();
+      : Handle<Object>::cast(isolate_->factory()->undefined_value());
 }
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     return code->is_keyed_load_stub() &&
         code->ic_state() == MONOMORPHIC &&
         Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
-        code->FindFirstMap() != NULL;
+        code->FindFirstMap() != NULL &&
+        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = Isolate::Current()->builtins();
+    Builtins* builtins = isolate_->builtins();
     return code->is_keyed_load_stub() &&
         *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
         code->ic_state() == MEGAMORPHIC;
@@ -103,23 +106,25 @@
 
 
 bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     return code->is_keyed_store_stub() &&
         code->ic_state() == MONOMORPHIC &&
-        Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
+        Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
+        code->FindFirstMap() != NULL &&
+        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = Isolate::Current()->builtins();
+    Builtins* builtins = isolate_->builtins();
     return code->is_keyed_store_stub() &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
@@ -131,18 +136,20 @@
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
   Handle<Object> value = GetInfo(expr->id());
-  return value->IsMap() || value->IsSmi();
+  return value->IsMap() || value->IsSmi() || value->IsJSFunction();
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
   ASSERT(LoadIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Map* first_map = code->FindFirstMap();
     ASSERT(first_map != NULL);
-    return Handle<Map>(first_map);
+    return CanRetainOtherContext(first_map, *global_context_)
+        ? Handle<Map>::null()
+        : Handle<Map>(first_map);
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -150,10 +157,14 @@
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    return Handle<Map>(code->FindFirstMap());
+    Map* first_map = code->FindFirstMap();
+    ASSERT(first_map != NULL);
+    return CanRetainOtherContext(first_map, *global_context_)
+        ? Handle<Map>::null()
+        : Handle<Map>(first_map);
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -203,6 +214,7 @@
   return check;
 }
 
+
 Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
     CheckType check) {
   JSFunction* function = NULL;
@@ -225,9 +237,14 @@
 }
 
 
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
+  return Handle<JSFunction>::cast(GetInfo(expr->id()));
+}
+
+
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
   return *GetInfo(expr->id()) ==
-      Isolate::Current()->builtins()->builtin(id);
+      isolate_->builtins()->builtin(id);
 }
 
 
@@ -352,6 +369,10 @@
       return unknown;
     case CompareIC::SMIS:
       return TypeInfo::Smi();
+    case CompareIC::STRINGS:
+      return TypeInfo::String();
+    case CompareIC::SYMBOLS:
+      return TypeInfo::Symbol();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
     case CompareIC::OBJECTS:
@@ -397,24 +418,70 @@
                                               Handle<String> name,
                                               Code::Flags flags,
                                               SmallMapList* types) {
-  Isolate* isolate = Isolate::Current();
   Handle<Object> object = GetInfo(ast_id);
   if (object->IsUndefined() || object->IsSmi()) return;
 
-  if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+  if (*object ==
+      isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
     // TODO(fschneider): We could collect the maps and signal that
     // we need a generic store (or load) here.
     ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
   } else if (object->IsMap()) {
     types->Add(Handle<Map>::cast(object));
-  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+  } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+      Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
     types->Reserve(4);
     ASSERT(object->IsCode());
-    isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
+    isolate_->stub_cache()->CollectMatchingMaps(types,
+                                                *name,
+                                                flags,
+                                                global_context_);
   }
 }
 
 
+// Check if a map originates from a given global context. We use this
+// information to filter out maps from different context to avoid
+// retaining objects from different tabs in Chrome via optimized code.
+bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
+                                               Context* global_context) {
+  Object* constructor = NULL;
+  while (!map->prototype()->IsNull()) {
+    constructor = map->constructor();
+    if (!constructor->IsNull()) {
+      // If the constructor is not null or a JSFunction, we have to
+      // conservatively assume that it may retain a global context.
+      if (!constructor->IsJSFunction()) return true;
+      // Check if the constructor directly references a foreign context.
+      if (CanRetainOtherContext(JSFunction::cast(constructor),
+                                global_context)) {
+        return true;
+      }
+    }
+    map = HeapObject::cast(map->prototype())->map();
+  }
+  constructor = map->constructor();
+  if (constructor->IsNull()) return false;
+  JSFunction* function = JSFunction::cast(constructor);
+  return CanRetainOtherContext(function, global_context);
+}
+
+
+bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
+                                               Context* global_context) {
+  return function->context()->global() != global_context->global()
+      && function->context()->global() != global_context->builtins();
+}
+
+
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
+  for (int i = 0; i < list->length(); ++i) {
+    if (list->at(i).is_identical_to(map)) return;
+  }
+  list->Add(map);
+}
+
+
 void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
                                                    SmallMapList* types) {
   Handle<Object> object = GetInfo(ast_id);
@@ -428,7 +495,10 @@
       RelocInfo* info = it.rinfo();
       Object* object = info->target_object();
       if (object->IsMap()) {
-        types->Add(Handle<Map>(Map::cast(object)));
+        Map* map = Map::cast(object);
+        if (!CanRetainOtherContext(map, *global_context_)) {
+          AddMapIfMissing(Handle<Map>(map), types);
+        }
       }
     }
   }
@@ -470,7 +540,7 @@
                                           ZoneList<RelocInfo>* infos) {
   DisableAssertNoAllocation allocation_allowed;
   byte* old_start = code->instruction_start();
-  dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
+  dictionary_ = FACTORY->NewNumberDictionary(infos->length());
   byte* new_start = code->instruction_start();
   RelocateRelocInfos(infos, old_start, new_start);
 }
@@ -488,55 +558,69 @@
 
 void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
   for (int i = 0; i < infos->length(); i++) {
+    RelocInfo reloc_entry = (*infos)[i];
+    Address target_address = reloc_entry.target_address();
     unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
-    Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
-    ProcessTarget(ast_id, target);
-  }
-}
-
-
-void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
-  switch (target->kind()) {
-    case Code::LOAD_IC:
-    case Code::STORE_IC:
-    case Code::CALL_IC:
-    case Code::KEYED_CALL_IC:
-      if (target->ic_state() == MONOMORPHIC) {
-        if (target->kind() == Code::CALL_IC &&
-            target->check_type() != RECEIVER_MAP_CHECK) {
-          SetInfo(ast_id,  Smi::FromInt(target->check_type()));
-        } else {
-          Object* map = target->FindFirstMap();
-          SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+    Code* target = Code::GetCodeFromTargetAddress(target_address);
+    switch (target->kind()) {
+      case Code::LOAD_IC:
+      case Code::STORE_IC:
+      case Code::CALL_IC:
+      case Code::KEYED_CALL_IC:
+        if (target->ic_state() == MONOMORPHIC) {
+          if (target->kind() == Code::CALL_IC &&
+              target->check_type() != RECEIVER_MAP_CHECK) {
+            SetInfo(ast_id, Smi::FromInt(target->check_type()));
+          } else {
+            Object* map = target->FindFirstMap();
+            if (map == NULL) {
+              SetInfo(ast_id, static_cast<Object*>(target));
+            } else if (!CanRetainOtherContext(Map::cast(map),
+                                              *global_context_)) {
+              SetInfo(ast_id, map);
+            }
+          }
+        } else if (target->ic_state() == MEGAMORPHIC) {
+          SetInfo(ast_id, target);
         }
-      } else if (target->ic_state() == MEGAMORPHIC) {
+        break;
+
+      case Code::KEYED_LOAD_IC:
+      case Code::KEYED_STORE_IC:
+        if (target->ic_state() == MONOMORPHIC ||
+            target->ic_state() == MEGAMORPHIC) {
+          SetInfo(ast_id, target);
+        }
+        break;
+
+      case Code::UNARY_OP_IC:
+      case Code::BINARY_OP_IC:
+      case Code::COMPARE_IC:
+      case Code::TO_BOOLEAN_IC:
         SetInfo(ast_id, target);
-      }
-      break;
+        break;
 
-    case Code::KEYED_LOAD_IC:
-    case Code::KEYED_STORE_IC:
-      if (target->ic_state() == MONOMORPHIC ||
-          target->ic_state() == MEGAMORPHIC) {
-        SetInfo(ast_id, target);
-      }
-      break;
+      case Code::STUB:
+        if (target->major_key() == CodeStub::CallFunction &&
+            target->has_function_cache()) {
+          Object* value = CallFunctionStub::GetCachedValue(reloc_entry.pc());
+          if (value->IsJSFunction() &&
+              !CanRetainOtherContext(JSFunction::cast(value),
+                                     *global_context_)) {
+            SetInfo(ast_id, value);
+          }
+        }
+        break;
 
-    case Code::UNARY_OP_IC:
-    case Code::BINARY_OP_IC:
-    case Code::COMPARE_IC:
-    case Code::TO_BOOLEAN_IC:
-      SetInfo(ast_id, target);
-      break;
-
-    default:
-      break;
+      default:
+        break;
+    }
   }
 }
 
 
 void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
-  ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
+  ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
   MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
   USE(maybe_result);
 #ifdef DEBUG
diff --git a/src/type-info.h b/src/type-info.h
index a031740..167494c 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -64,6 +64,8 @@
   static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
   static TypeInfo Smi() { return TypeInfo(kSmi); }
+  // We know it's a Symbol.
+  static TypeInfo Symbol() { return TypeInfo(kSymbol); }
   // We know it's a heap number.
   static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
@@ -137,6 +139,16 @@
     return ((type_ & kSmi) == kSmi);
   }
 
+  inline bool IsSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kSymbol);
+  }
+
+  inline bool IsNonSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kString);
+  }
+
   inline bool IsInteger32() {
     ASSERT(type_ != kUninitialized);
     return ((type_ & kInteger32) == kInteger32);
@@ -168,6 +180,7 @@
       case kNumber: return "Number";
       case kInteger32: return "Integer32";
       case kSmi: return "Smi";
+      case kSymbol: return "Symbol";
       case kDouble: return "Double";
       case kString: return "String";
       case kNonPrimitive: return "Object";
@@ -186,6 +199,7 @@
     kSmi = 0x17,           // 0010111
     kDouble = 0x19,        // 0011001
     kString = 0x30,        // 0110000
+    kSymbol = 0x32,        // 0110010
     kNonPrimitive = 0x40,  // 1000000
     kUninitialized = 0x7f  // 1111111
   };
@@ -216,7 +230,9 @@
 
 class TypeFeedbackOracle BASE_EMBEDDED {
  public:
-  TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
+  TypeFeedbackOracle(Handle<Code> code,
+                     Handle<Context> global_context,
+                     Isolate* isolate);
 
   bool LoadIsMonomorphicNormal(Property* expr);
   bool LoadIsMegamorphicWithTypeInfo(Property* expr);
@@ -240,9 +256,15 @@
   void CollectKeyedReceiverTypes(unsigned ast_id,
                                  SmallMapList* types);
 
+  static bool CanRetainOtherContext(Map* map, Context* global_context);
+  static bool CanRetainOtherContext(JSFunction* function,
+                                    Context* global_context);
+
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
 
+  Handle<JSFunction> GetCallTarget(Call* expr);
+
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -273,14 +295,14 @@
                           byte* old_start,
                           byte* new_start);
   void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
-  void ProcessTarget(unsigned ast_id, Code* target);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
   Handle<Object> GetInfo(unsigned ast_id);
 
   Handle<Context> global_context_;
-  Handle<UnseededNumberDictionary> dictionary_;
+  Isolate* isolate_;
+  Handle<NumberDictionary> dictionary_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
 };
diff --git a/src/unicode.h b/src/unicode.h
index 39fc349..fb9e633 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,7 @@
  * The max length of the result of converting the case of a single
  * character.
  */
-static const int kMaxMappingSize = 4;
+const int kMaxMappingSize = 4;
 
 template <class T, int size = 256>
 class Predicate {
diff --git a/src/uri.js b/src/uri.js
index c910d75..e76104a 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -111,47 +111,59 @@
     var o1 = octets[1];
     if (o0 < 0xe0) {
       var a = o0 & 0x1f;
-      if ((o1 < 0x80) || (o1 > 0xbf))
+      if ((o1 < 0x80) || (o1 > 0xbf)) {
         throw new $URIError("URI malformed");
+      }
       var b = o1 & 0x3f;
       value = (a << 6) + b;
-      if (value < 0x80 || value > 0x7ff)
+      if (value < 0x80 || value > 0x7ff) {
         throw new $URIError("URI malformed");
+      }
     } else {
       var o2 = octets[2];
       if (o0 < 0xf0) {
         var a = o0 & 0x0f;
-        if ((o1 < 0x80) || (o1 > 0xbf))
+        if ((o1 < 0x80) || (o1 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var b = o1 & 0x3f;
-        if ((o2 < 0x80) || (o2 > 0xbf))
+        if ((o2 < 0x80) || (o2 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var c = o2 & 0x3f;
         value = (a << 12) + (b << 6) + c;
-        if ((value < 0x800) || (value > 0xffff))
+        if ((value < 0x800) || (value > 0xffff)) {
           throw new $URIError("URI malformed");
+        }
       } else {
         var o3 = octets[3];
         if (o0 < 0xf8) {
           var a = (o0 & 0x07);
-          if ((o1 < 0x80) || (o1 > 0xbf))
+          if ((o1 < 0x80) || (o1 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var b = (o1 & 0x3f);
-          if ((o2 < 0x80) || (o2 > 0xbf))
+          if ((o2 < 0x80) || (o2 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var c = (o2 & 0x3f);
-          if ((o3 < 0x80) || (o3 > 0xbf))
+          if ((o3 < 0x80) || (o3 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var d = (o3 & 0x3f);
           value = (a << 18) + (b << 12) + (c << 6) + d;
-          if ((value < 0x10000) || (value > 0x10ffff))
+          if ((value < 0x10000) || (value > 0x10ffff)) {
             throw new $URIError("URI malformed");
+          }
         } else {
           throw new $URIError("URI malformed");
         }
       }
     }
   }
+  if (0xD800 <= value && value <= 0xDFFF) {
+    throw new $URIError("URI malformed");
+  }
   if (value < 0x10000) {
     result[index++] = value;
     return index;
@@ -207,14 +219,15 @@
       var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
       if (cc >> 7) {
         var n = 0;
-        while (((cc << ++n) & 0x80) != 0) ;
+        while (((cc << ++n) & 0x80) != 0) { }
         if (n == 1 || n > 4) throw new $URIError("URI malformed");
         var octets = new $Array(n);
         octets[0] = cc;
         if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
         for (var i = 1; i < n; i++) {
           if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
-          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
+                                            uri.charCodeAt(++k));
         }
         index = URIDecodeOctets(octets, result, index);
       } else {
@@ -254,7 +267,7 @@
     if (63 <= cc && cc <= 64) return true;
 
     return false;
-  };
+  }
   var string = ToString(uri);
   return Decode(string, reservedPredicate);
 }
@@ -262,7 +275,7 @@
 
 // ECMA-262 - 15.1.3.2.
 function URIDecodeComponent(component) {
-  function reservedPredicate(cc) { return false; };
+  function reservedPredicate(cc) { return false; }
   var string = ToString(component);
   return Decode(string, reservedPredicate);
 }
@@ -303,7 +316,7 @@
     if (cc == 126) return true;
 
     return false;
-  };
+  }
 
   var string = ToString(uri);
   return Encode(string, unescapePredicate);
@@ -326,7 +339,7 @@
     if (cc == 126) return true;
 
     return false;
-  };
+  }
 
   var string = ToString(component);
   return Encode(string, unescapePredicate);
@@ -366,7 +379,9 @@
 function IsValidHex(s) {
   for (var i = 0; i < s.length; ++i) {
     var cc = s.charCodeAt(i);
-    if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+    if ((48 <= cc && cc <= 57) ||
+        (65 <= cc && cc <= 70) ||
+        (97 <= cc && cc <= 102)) {
       // '0'..'9', 'A'..'F' and 'a' .. 'f'.
     } else {
       return false;
diff --git a/src/utils.h b/src/utils.h
index cf7819e..68b1517 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -47,13 +47,13 @@
 // Returns true iff x is a power of 2 (or zero). Cannot be used with the
 // maximally negative value of the type T (the -1 overflows).
 template <typename T>
-static inline bool IsPowerOf2(T x) {
+inline bool IsPowerOf2(T x) {
   return IS_POWER_OF_TWO(x);
 }
 
 
 // X must be a power of 2.  Returns the number of trailing zeros.
-static inline int WhichPowerOf2(uint32_t x) {
+inline int WhichPowerOf2(uint32_t x) {
   ASSERT(IsPowerOf2(x));
   ASSERT(x != 0);
   int bits = 0;
@@ -88,7 +88,7 @@
 // The C++ standard leaves the semantics of '>>' undefined for
 // negative signed operands. Most implementations do the right thing,
 // though.
-static inline int ArithmeticShiftRight(int x, int s) {
+inline int ArithmeticShiftRight(int x, int s) {
   return x >> s;
 }
 
@@ -97,7 +97,7 @@
 // This allows conversion of Addresses and integral types into
 // 0-relative int offsets.
 template <typename T>
-static inline intptr_t OffsetFrom(T x) {
+inline intptr_t OffsetFrom(T x) {
   return x - static_cast<T>(0);
 }
 
@@ -106,14 +106,14 @@
 // This allows conversion of 0-relative int offsets into Addresses and
 // integral types.
 template <typename T>
-static inline T AddressFrom(intptr_t x) {
+inline T AddressFrom(intptr_t x) {
   return static_cast<T>(static_cast<T>(0) + x);
 }
 
 
 // Return the largest multiple of m which is <= x.
 template <typename T>
-static inline T RoundDown(T x, int m) {
+inline T RoundDown(T x, intptr_t m) {
   ASSERT(IsPowerOf2(m));
   return AddressFrom<T>(OffsetFrom(x) & -m);
 }
@@ -121,13 +121,13 @@
 
 // Return the smallest multiple of m which is >= x.
 template <typename T>
-static inline T RoundUp(T x, int m) {
-  return RoundDown(x + m - 1, m);
+inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
 }
 
 
 template <typename T>
-static int Compare(const T& a, const T& b) {
+int Compare(const T& a, const T& b) {
   if (a == b)
     return 0;
   else if (a < b)
@@ -138,16 +138,26 @@
 
 
 template <typename T>
-static int PointerValueCompare(const T* a, const T* b) {
+int PointerValueCompare(const T* a, const T* b) {
   return Compare<T>(*a, *b);
 }
 
 
+// Compare function to compare the object pointer value of two
+// handlified objects. The handles are passed as pointers to the
+// handles.
+template<typename T> class Handle;  // Forward declaration.
+template <typename T>
+int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
+  return Compare<T*>(*(*a), *(*b));
+}
+
+
 // Returns the smallest power of two which is >= x. If you pass in a
 // number that is already a power of two, it is returned as is.
 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
 // figure 3-3, page 48, where the function is called clp2.
-static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+inline uint32_t RoundUpToPowerOf2(uint32_t x) {
   ASSERT(x <= 0x80000000u);
   x = x - 1;
   x = x | (x >> 1);
@@ -159,18 +169,23 @@
 }
 
 
+inline uint32_t RoundDownToPowerOf2(uint32_t x) {
+  uint32_t rounded_up = RoundUpToPowerOf2(x);
+  if (rounded_up > x) return rounded_up >> 1;
+  return rounded_up;
+}
 
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
-  ASSERT(IsPowerOf2(alignment));
+
+template <typename T, typename U>
+inline bool IsAligned(T value, U alignment) {
   return (value & (alignment - 1)) == 0;
 }
 
 
 // Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr,
-                                    intptr_t alignment,
-                                    int offset) {
+inline bool IsAddressAligned(Address addr,
+                             intptr_t alignment,
+                             int offset = 0) {
   intptr_t offs = OffsetFrom(addr + offset);
   return IsAligned(offs, alignment);
 }
@@ -178,14 +193,14 @@
 
 // Returns the maximum of the two parameters.
 template <typename T>
-static T Max(T a, T b) {
+T Max(T a, T b) {
   return a < b ? b : a;
 }
 
 
 // Returns the minimum of the two parameters.
 template <typename T>
-static T Min(T a, T b) {
+T Min(T a, T b) {
   return a < b ? a : b;
 }
 
@@ -237,13 +252,10 @@
 // ----------------------------------------------------------------------------
 // Hash function.
 
-static const uint32_t kZeroHashSeed = 0;
-
 // Thomas Wang, Integer Hash Functions.
 // http://www.concentric.net/~Ttwang/tech/inthash.htm
-static inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
+inline uint32_t ComputeIntegerHash(uint32_t key) {
   uint32_t hash = key;
-  hash = hash ^ seed;
   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
   hash = hash ^ (hash >> 12);
   hash = hash + (hash << 2);
@@ -254,10 +266,21 @@
 }
 
 
-static inline uint32_t ComputePointerHash(void* ptr) {
+inline uint32_t ComputeLongHash(uint64_t key) {
+  uint64_t hash = key;
+  hash = ~hash + (hash << 18);  // hash = (hash << 18) - hash - 1;
+  hash = hash ^ (hash >> 31);
+  hash = hash * 21;  // hash = (hash + (hash << 2)) + (hash << 4);
+  hash = hash ^ (hash >> 11);
+  hash = hash + (hash << 6);
+  hash = hash ^ (hash >> 22);
+  return (uint32_t) hash;
+}
+
+
+inline uint32_t ComputePointerHash(void* ptr) {
   return ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
-      v8::internal::kZeroHashSeed);
+      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
 }
 
 
@@ -711,7 +734,7 @@
 
 // Compare ASCII/16bit chars to ASCII/16bit chars.
 template <typename lchar, typename rchar>
-static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
   const lchar* limit = lhs + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -738,7 +761,7 @@
 
 
 // Calculate 10^exponent.
-static inline int TenToThe(int exponent) {
+inline int TenToThe(int exponent) {
   ASSERT(exponent <= 9);
   ASSERT(exponent >= 1);
   int answer = 10;
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 2de8303..47341e7 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -107,7 +107,10 @@
   SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)      \
   /* Number of code objects found from pc. */                         \
   SC(pc_to_code, V8.PcToCode)                                         \
-  SC(pc_to_code_cached, V8.PcToCodeCached)
+  SC(pc_to_code_cached, V8.PcToCodeCached)                            \
+  /* The store-buffer implementation of the write barrier. */         \
+  SC(store_buffer_compactions, V8.StoreBufferCompactions)             \
+  SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                      \
@@ -126,10 +129,6 @@
      V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
-  SC(map_to_fast_elements, V8.MapToFastElements)                      \
-  SC(map_to_fast_double_elements, V8.MapToFastDoubleElements)         \
-  SC(map_to_slow_elements, V8.MapToSlowElements)                      \
-  SC(map_to_external_array_elements, V8.MapToExternalArrayElements)   \
   /* How is the generic keyed-load stub used? */                      \
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
diff --git a/src/v8.cc b/src/v8.cc
index 1e9b5dc..66c65e7 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -38,6 +38,7 @@
 #include "log.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
+#include "store-buffer.h"
 
 namespace v8 {
 namespace internal {
@@ -56,6 +57,15 @@
 
 
 bool V8::Initialize(Deserializer* des) {
+  // Setting --harmony implies all other harmony flags.
+  // TODO(rossberg): Is there a better place to put this?
+  if (FLAG_harmony) {
+    FLAG_harmony_typeof = true;
+    FLAG_harmony_scoping = true;
+    FLAG_harmony_proxies = true;
+    FLAG_harmony_collections = true;
+  }
+
   InitializeOncePerProcess();
 
   // The current thread may not yet had entered an isolate to run.
@@ -140,9 +150,10 @@
 
 
 // Used by JavaScript APIs
-uint32_t V8::Random(Isolate* isolate) {
-  ASSERT(isolate == Isolate::Current());
-  return random_base(isolate->random_seed());
+uint32_t V8::Random(Context* context) {
+  ASSERT(context->IsGlobalContext());
+  ByteArray* seed = context->random_seed();
+  return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
 }
 
 
@@ -172,8 +183,9 @@
 } double_int_union;
 
 
-Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
-  uint64_t random_bits = Random(isolate);
+Object* V8::FillHeapNumberWithRandom(Object* heap_number,
+                                     Context* context) {
+  uint64_t random_bits = Random(context);
   // Make a double* from address (heap_number + sizeof(double)).
   double_int_union* r = reinterpret_cast<double_int_union*>(
       reinterpret_cast<char*>(heap_number) +
@@ -215,6 +227,12 @@
   FLAG_peephole_optimization = !use_crankshaft_;
 
   ElementsAccessor::InitializeOncePerProcess();
+
+  if (FLAG_stress_compaction) {
+    FLAG_force_marking_deque_overflows = true;
+    FLAG_gc_global = true;
+    FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
+  }
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index e565ca5..01feefc 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -60,10 +60,11 @@
 #include "objects-inl.h"
 #include "spaces-inl.h"
 #include "heap-inl.h"
+#include "incremental-marking-inl.h"
+#include "mark-compact-inl.h"
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
-#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -95,14 +96,14 @@
   // generation.
   static void SetEntropySource(EntropySource source);
   // Random number generation support. Not cryptographically safe.
-  static uint32_t Random(Isolate* isolate);
+  static uint32_t Random(Context* context);
   // We use random numbers internally in memory allocation and in the
   // compilers for security. In order to prevent information leaks we
   // use a separate random state for internal random number
   // generation.
   static uint32_t RandomPrivate(Isolate* isolate);
   static Object* FillHeapNumberWithRandom(Object* heap_number,
-                                          Isolate* isolate);
+                                          Context* context);
 
   // Idle notification directly from the API.
   static bool IdleNotification();
@@ -124,6 +125,15 @@
   static bool use_crankshaft_;
 };
 
+
+// JavaScript defines two kinds of 'nil'.
+enum NilValue { kNullValue, kUndefinedValue };
+
+
+// JavaScript defines two kinds of equality.
+enum EqualityKind { kStrictEquality, kNonStrictEquality };
+
+
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/v8conversions.h b/src/v8conversions.h
index 1840e3a..0147d8c 100644
--- a/src/v8conversions.h
+++ b/src/v8conversions.h
@@ -34,13 +34,13 @@
 namespace internal {
 
 // Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number) {
+inline int32_t NumberToInt32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToInt32(number->Number());
 }
 
 
-static inline uint32_t NumberToUint32(Object* number) {
+inline uint32_t NumberToUint32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToUint32(number->Number());
 }
diff --git a/src/v8globals.h b/src/v8globals.h
index bf843e5..005cdbd 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -29,6 +29,7 @@
 #define V8_V8GLOBALS_H_
 
 #include "globals.h"
+#include "checks.h"
 
 namespace v8 {
 namespace internal {
@@ -79,18 +80,20 @@
     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
+const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
 const uint32_t kSlotsZapValue = 0xbeefdeef;
 const uint32_t kDebugZapValue = 0xbadbaddb;
+const uint32_t kFreeListZapValue = 0xfeed1eaf;
 #endif
 
 
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
 
 // On Intel architecture, cache line size is 64 bytes.
 // On ARM it may be less (32 bytes), but as far this constant is
@@ -98,10 +101,6 @@
 const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
 
@@ -128,11 +127,10 @@
 class Expression;
 class ExternalReference;
 class FixedArray;
-class FunctionEntry;
 class FunctionLiteral;
 class FunctionTemplateInfo;
-class SeededNumberDictionary;
-class UnseededNumberDictionary;
+class MemoryChunk;
+class NumberDictionary;
 class StringDictionary;
 template <typename T> class Handle;
 class Heap;
@@ -162,8 +160,7 @@
 class RegExpCompiler;
 class RegExpVisitor;
 class Scope;
-template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
-class SerializedScopeInfo;
+class ScopeInfo;
 class Script;
 class Slot;
 class Smi;
@@ -255,12 +252,6 @@
 };
 
 
-// Callback function on object slots, used for iterating heap object slots in
-// HeapObjects, global pointers to heap objects, etc. The callback allows the
-// callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
-
-
 // Callback function used for iterating objects in heap spaces,
 // for example, scanning heap objects.
 typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -307,7 +298,9 @@
   NO_CALL_FUNCTION_FLAGS = 0,
   // Receiver might implicitly be the global objects. If it is, the
   // hole is passed to the call function stub.
-  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
+  // The call target is cached in the instruction stream.
+  RECORD_CALL_TARGET = 1 << 1
 };
 
 
@@ -317,28 +310,17 @@
 };
 
 
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
-  NORMAL                    = 0,  // only in slow mode
-  FIELD                     = 1,  // only in fast mode
-  CONSTANT_FUNCTION         = 2,  // only in fast mode
-  CALLBACKS                 = 3,
-  HANDLER                   = 4,  // only in lookup results, not in descriptors
-  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
-  MAP_TRANSITION            = 6,  // only in fast mode
-  ELEMENTS_TRANSITION       = 7,
-  CONSTANT_TRANSITION       = 8,  // only in fast mode
-  NULL_DESCRIPTOR           = 9,  // only in fast mode
-  // All properties before MAP_TRANSITION are real.
-  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
-  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
-  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
-  // nonexistent properties.
-  NONEXISTENT = NULL_DESCRIPTOR
-};
+// The Store Buffer (GC).
+typedef enum {
+  kStoreBufferFullEvent,
+  kStoreBufferStartScanningPagesEvent,
+  kStoreBufferScanningPageEvent
+} StoreBufferEvent;
+
+
+typedef void (*StoreBufferCallback)(Heap* heap,
+                                    MemoryChunk* page,
+                                    StoreBufferEvent event);
 
 
 // Whether to remove map transitions and constant transitions from a
@@ -475,21 +457,11 @@
                   SAHF = 0,    // x86
                   FPU = 1};    // MIPS
 
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum StrictModeFlag {
-  kNonStrictMode,
-  kStrictMode,
-  // This value is never used, but is needed to prevent GCC 4.5 from failing
-  // to compile when we assert that a flag is either kNonStrictMode or
-  // kStrictMode.
-  kInvalidStrictFlag
-};
-
 
 // Used to specify if a macro instruction must perform a smi check on tagged
 // values.
 enum SmiCheckType {
-  DONT_DO_SMI_CHECK = 0,
+  DONT_DO_SMI_CHECK,
   DO_SMI_CHECK
 };
 
@@ -497,20 +469,105 @@
 // Used to specify whether a receiver is implicitly or explicitly
 // provided to a call.
 enum CallKind {
-  CALL_AS_METHOD = 0,
+  CALL_AS_METHOD,
   CALL_AS_FUNCTION
 };
 
 
-static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+enum ScopeType {
+  EVAL_SCOPE,      // The top-level scope for an eval source.
+  FUNCTION_SCOPE,  // The top-level scope for a function.
+  GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
+  CATCH_SCOPE,     // The scope introduced by catch.
+  BLOCK_SCOPE,     // The scope introduced by a new block.
+  WITH_SCOPE       // The scope introduced by with.
+};
+
+
+const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
 
 const uint64_t kHoleNanInt64 =
     (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
 const uint64_t kLastNonNaNInt64 =
     (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
 
+
+enum VariableMode {
+  // User declared variables:
+  VAR,             // declared via 'var', and 'function' declarations
+
+  CONST,           // declared via 'const' declarations
+
+  CONST_HARMONY,   // declared via 'const' declarations in harmony mode
+
+  LET,             // declared via 'let' declarations
+
+  // Variables introduced by the compiler:
+  DYNAMIC,         // always require dynamic lookup (we don't know
+                   // the declaration)
+
+  DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
+                   // variable is global unless it has been shadowed
+                   // by an eval-introduced variable
+
+  DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
+                   // variable is local and where it is unless it
+                   // has been shadowed by an eval-introduced
+                   // variable
+
+  INTERNAL,        // like VAR, but not user-visible (may or may not
+                   // be in a context)
+
+  TEMPORARY        // temporary variables (not user-visible), never
+                   // in a context
+};
+
+
+// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
+// and immutable bindings that can be in two states: initialized and
+// uninitialized. In ES5 only immutable bindings have these two states. When
+// accessing a binding, it needs to be checked for initialization. However in
+// the following cases the binding is initialized immediately after creation
+// so the initialization check can always be skipped:
+// 1. Var declared local variables.
+//      var foo;
+// 2. A local variable introduced by a function declaration.
+//      function foo() {}
+// 3. Parameters
+//      function x(foo) {}
+// 4. Catch bound variables.
+//      try {} catch (foo) {}
+// 6. Function variables of named function expressions.
+//      var x = function foo() {}
+// 7. Implicit binding of 'this'.
+// 8. Implicit binding of 'arguments' in functions.
+//
+// ES5 specified object environment records which are introduced by ES elements
+// such as Program and WithStatement that associate identifier bindings with the
+// properties of some object. In the specification only mutable bindings exist
+// (which may be non-writable) and have no distinct initialization step. However
+// V8 allows const declarations in global code with distinct creation and
+// initialization steps which are represented by non-writable properties in the
+// global object. As a result also these bindings need to be checked for
+// initialization.
+//
+// The following enum specifies a flag that indicates if the binding needs a
+// distinct initialization step (kNeedsInitialization) or if the binding is
+// immediately initialized upon creation (kCreatedInitialized).
+enum InitializationFlag {
+  kNeedsInitialization,
+  kCreatedInitialized
+};
+
+
+enum ClearExceptionFlag {
+  KEEP_EXCEPTION,
+  CLEAR_EXCEPTION
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8memory.h b/src/v8memory.h
index 901e78d..f71de82 100644
--- a/src/v8memory.h
+++ b/src/v8memory.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,6 +60,10 @@
     return *reinterpret_cast<int*>(addr);
   }
 
+  static unsigned& unsigned_at(Address addr) {
+    return *reinterpret_cast<unsigned*>(addr);
+  }
+
   static double& double_at(Address addr)  {
     return *reinterpret_cast<double*>(addr);
   }
diff --git a/src/v8natives.js b/src/v8natives.js
index 588bdb2..11b1a7e 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -60,18 +60,6 @@
   %ToFastProperties(object);
 }
 
-// Emulates JSC by installing functions on a hidden prototype that
-// lies above the current object/prototype.  This lets you override
-// functions on String.prototype etc. and then restore the old function
-// with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
-function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
-  %CheckIsBootstrapping();
-  var hidden_prototype = new $Object();
-  %SetHiddenPrototype(object, hidden_prototype);
-  InstallFunctions(hidden_prototype, attributes, functions);
-}
-
-
 // Prevents changes to the prototype of a built-infunction.
 // The "prototype" property of the function object is made non-configurable,
 // and the prototype object is made non-extensible. The latter prevents
@@ -139,8 +127,9 @@
     // The spec says ToString should be evaluated before ToInt32.
     string = TO_STRING_INLINE(string);
     radix = TO_INT32(radix);
-    if (!(radix == 0 || (2 <= radix && radix <= 36)))
+    if (!(radix == 0 || (2 <= radix && radix <= 36))) {
       return $NaN;
+    }
   }
 
   if (%_HasCachedArrayIndex(string) &&
@@ -162,28 +151,23 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
-  var receiver = this;
   var global_receiver = %GlobalReceiver(global);
-
-  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = global_receiver;
-  }
-
-  var this_is_global_receiver = (receiver === global_receiver);
   var global_is_detached = (global === global_receiver);
 
   // For consistency with JSC we require the global object passed to
   // eval to be the global object from which 'eval' originated. This
   // is not mandated by the spec.
-  if (!this_is_global_receiver || global_is_detached) {
-    throw new $EvalError('The "this" object passed to eval must ' +
+  // We only throw if the global has been detached, since we need the
+  // receiver as this-value for the call.
+  if (global_is_detached) {
+    throw new $EvalError('The "this" value passed to eval must ' +
                          'be the global object from which eval originated');
   }
 
   var f = %CompileString(x);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(receiver, f);
+  return %_CallFunction(global_receiver, f);
 }
 
 
@@ -193,13 +177,14 @@
 function SetUpGlobal() {
   %CheckIsBootstrapping();
   // ECMA 262 - 15.1.1.1.
-  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.2.
-  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.3.
-  %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "undefined", void 0,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
@@ -299,7 +284,8 @@
     receiver = %GlobalReceiver(global);
   }
   if (!IS_SPEC_FUNCTION(fun)) {
-    throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
+    throw new $TypeError(
+        'Object.prototype.__defineGetter__: Expecting function');
   }
   var desc = new PropertyDescriptor();
   desc.setGet(fun);
@@ -345,8 +331,9 @@
 
 
 function ObjectKeys(obj) {
-  if (!IS_SPEC_OBJECT(obj))
+  if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
+  }
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
     var names = CallTrap0(handler, "keys", DerivedKeysTrap);
@@ -372,6 +359,7 @@
 
 // ES5 8.10.3.
 function IsGenericDescriptor(desc) {
+  if (IS_UNDEFINED(desc)) return false;
   return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
 }
 
@@ -476,7 +464,7 @@
 
 // For Harmony proxies.
 function ToCompletePropertyDescriptor(obj) {
-  var desc = ToPropertyDescriptor(obj)
+  var desc = ToPropertyDescriptor(obj);
   if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
     if (!desc.hasValue()) desc.setValue(void 0);
     if (!desc.hasWritable()) desc.setWritable(false);
@@ -708,7 +696,7 @@
     if (should_throw) {
       throw MakeTypeError("define_disallowed", [p]);
     } else {
-      return;
+      return false;
     }
   }
 
@@ -738,7 +726,7 @@
         if (should_throw) {
           throw MakeTypeError("redefine_disallowed", [p]);
         } else {
-          return;
+          return false;
         }
       }
       // Step 8
@@ -748,7 +736,7 @@
           if (should_throw) {
             throw MakeTypeError("redefine_disallowed", [p]);
           } else {
-            return;
+            return false;
           }
         }
         // Step 10a
@@ -757,7 +745,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
           if (!current.isWritable() && desc.hasValue() &&
@@ -765,7 +753,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
         }
@@ -775,14 +763,14 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
           if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return;
+              return false;
             }
           }
         }
@@ -860,17 +848,19 @@
 
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
-  if (!IS_SPEC_OBJECT(obj))
+  if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
+  }
   return %GetPrototype(obj);
 }
 
 
 // ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
-  if (!IS_SPEC_OBJECT(obj))
+  if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object",
                         ["getOwnPropertyDescriptor"]);
+  }
   var desc = GetOwnProperty(obj, p);
   return FromPropertyDescriptor(desc);
 }
@@ -883,14 +873,14 @@
   }
   var n = ToUint32(obj.length);
   var array = new $Array(n);
-  var names = {}
+  var names = {};  // TODO(rossberg): use sets once they are ready.
   for (var index = 0; index < n; index++) {
     var s = ToString(obj[index]);
     if (s in names) {
-      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
     }
     array[index] = s;
-    names.s = 0;
+    names[s] = 0;
   }
   return array;
 }
@@ -898,9 +888,10 @@
 
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
-  if (!IS_SPEC_OBJECT(obj))
-    throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
-
+  if (!IS_SPEC_OBJECT(obj)) {
+    throw MakeTypeError("obj_ctor_property_non_object",
+                        ["getOwnPropertyNames"]);
+  }
   // Special handling for proxies.
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
@@ -917,8 +908,9 @@
   if (%GetInterceptorInfo(obj) & 1) {
     var indexedInterceptorNames =
         %GetIndexedInterceptorElementNames(obj);
-    if (indexedInterceptorNames)
+    if (indexedInterceptorNames) {
       propertyNames = propertyNames.concat(indexedInterceptorNames);
+    }
   }
 
   // Find all the named properties.
@@ -944,8 +936,9 @@
     // We need to check for the exact property value since for intrinsic
     // properties like toString if(propertySet["toString"]) will always
     // succeed.
-    if (propertySet[name] === true)
+    if (propertySet[name] === true) {
       continue;
+    }
     propertySet[name] = true;
     propertyNames[j++] = name;
   }
@@ -1021,14 +1014,17 @@
 
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
-  if (!IS_SPEC_OBJECT(obj))
+  if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
+  }
   var props = ToObject(properties);
   var names = GetOwnEnumerablePropertyNames(props);
+  var descriptors = new InternalArray();
   for (var i = 0; i < names.length; i++) {
-    var name = names[i];
-    var desc = ToPropertyDescriptor(props[name]);
-    DefineOwnProperty(obj, name, desc, true);
+    descriptors.push(ToPropertyDescriptor(props[names[i]]));
+  }
+  for (var i = 0; i < names.length; i++) {
+    DefineOwnProperty(obj, names[i], descriptors[i], true);
   }
   return obj;
 }
@@ -1042,12 +1038,20 @@
     throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
   }
 
-  if (IS_SPEC_FUNCTION(obj)) {
+  if (%IsJSFunctionProxy(obj)) {
     var callTrap = %GetCallTrap(obj);
     var constructTrap = %GetConstructTrap(obj);
     var code = DelegateCallAndConstruct(callTrap, constructTrap);
     %Fix(obj);  // becomes a regular function
     %SetCode(obj, code);
+    // TODO(rossberg): What about length and other properties? Not specified.
+    // We just put in some half-reasonable defaults for now.
+    var prototype = new $Object();
+    $Object.defineProperty(prototype, "constructor",
+      {value: obj, writable: true, enumerable: false, configurable: true});
+    // TODO(v8:1530): defineProperty does not handle prototype and length.
+    %FunctionSetPrototype(obj, prototype);
+    obj.length = 0;
   } else {
     %Fix(obj);
   }
@@ -1237,8 +1241,9 @@
 function BooleanValueOf() {
   // NOTE: Both Boolean objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
+  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
     throw new $TypeError('Boolean.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -1278,8 +1283,9 @@
   // 'this'. This is not as dictated by ECMA-262.
   var number = this;
   if (!IS_NUMBER(this)) {
-    if (!IS_NUMBER_WRAPPER(this))
+    if (!IS_NUMBER_WRAPPER(this)) {
       throw new $TypeError('Number.prototype.toString is not generic');
+    }
     // Get the value of this number in case it's an object.
     number = %_ValueOf(this);
   }
@@ -1312,8 +1318,9 @@
 function NumberValueOf() {
   // NOTE: Both Number objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
+  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
     throw new $TypeError('Number.prototype.valueOf is not generic');
+  }
   return %_ValueOf(this);
 }
 
@@ -1339,7 +1346,8 @@
   if (!IS_UNDEFINED(fractionDigits)) {
     f = TO_INTEGER(fractionDigits);
     if (f < 0 || f > 20) {
-      throw new $RangeError("toExponential() argument must be between 0 and 20");
+      throw new $RangeError(
+          "toExponential() argument must be between 0 and 20");
     }
   }
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -1383,7 +1391,8 @@
                DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.2.
-  %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty($Number, "MIN_VALUE", 5e-324,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.3.
   %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1455,53 +1464,54 @@
 // ES5 15.3.4.5
 function FunctionBind(this_arg) { // Length is 1.
   if (!IS_SPEC_FUNCTION(this)) {
-      throw new $TypeError('Bind must be called on a function');
+    throw new $TypeError('Bind must be called on a function');
   }
-  // this_arg is not an argument that should be bound.
-  var argc_bound = (%_ArgumentsLength() || 1) - 1;
-  var fn = this;
-
-  if (argc_bound == 0) {
-    var result = function() {
-      if (%_IsConstructCall()) {
-        // %NewObjectFromBound implicitly uses arguments passed to this
-        // function. We do not pass the arguments object explicitly to avoid
-        // materializing it and guarantee that this function will be optimized.
-        return %NewObjectFromBound(fn, null);
-      }
-      return %Apply(fn, this_arg, arguments, 0, %_ArgumentsLength());
-    };
-  } else {
-    var bound_args = new InternalArray(argc_bound);
-    for(var i = 0; i < argc_bound; i++) {
-      bound_args[i] = %_Arguments(i+1);
+  var boundFunction = function () {
+    // Poison .arguments and .caller, but is otherwise not detectable.
+    "use strict";
+    // This function must not use any object literals (Object, Array, RegExp),
+    // since the literals-array is being used to store the bound data.
+    if (%_IsConstructCall()) {
+      return %NewObjectFromBound(boundFunction);
     }
+    var bindings = %BoundFunctionGetBindings(boundFunction);
 
-    var result = function() {
-      // If this is a construct call we use a special runtime method
-      // to generate the actual object using the bound function.
-      if (%_IsConstructCall()) {
-        // %NewObjectFromBound implicitly uses arguments passed to this
-        // function. We do not pass the arguments object explicitly to avoid
-        // materializing it and guarantee that this function will be optimized.
-        return %NewObjectFromBound(fn, bound_args);
-      }
+    var argc = %_ArgumentsLength();
+    if (argc == 0) {
+      return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
+    }
+    if (bindings.length === 2) {
+      return %Apply(bindings[0], bindings[1], arguments, 0, argc);
+    }
+    var bound_argc = bindings.length - 2;
+    var argv = new InternalArray(bound_argc + argc);
+    for (var i = 0; i < bound_argc; i++) {
+      argv[i] = bindings[i + 2];
+    }
+    for (var j = 0; j < argc; j++) {
+      argv[i++] = %_Arguments(j);
+    }
+    return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
+  };
 
-      // Combine the args we got from the bind call with the args
-      // given as argument to the invocation.
+  %FunctionRemovePrototype(boundFunction);
+  var new_length = 0;
+  if (%_ClassOf(this) == "Function") {
+    // Function or FunctionProxy.
+    var old_length = this.length;
+    // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
+    if ((typeof old_length === "number") &&
+        ((old_length >>> 0) === old_length)) {
       var argc = %_ArgumentsLength();
-      var args = new InternalArray(argc + argc_bound);
-      // Add bound arguments.
-      for (var i = 0; i < argc_bound; i++) {
-        args[i] = bound_args[i];
-      }
-      // Add arguments from call.
-      for (var i = 0; i < argc; i++) {
-        args[argc_bound + i] = %_Arguments(i);
-      }
-      return %Apply(fn, this_arg, args, 0, argc + argc_bound);
-    };
+      if (argc > 0) argc--;  // Don't count the thisArg as parameter.
+      new_length = old_length - argc;
+      if (new_length < 0) new_length = 0;
+    }
   }
+  // This runtime function finds any remaining arguments on the stack,
+  // so we don't pass the arguments object.
+  var result = %FunctionBindArguments(boundFunction, this,
+                                      this_arg, new_length);
 
   // We already have caller and arguments properties on functions,
   // which are non-configurable. It therefore makes no sence to
@@ -1509,17 +1519,7 @@
   // that bind should make these throw a TypeError if get or set
   // is called and make them non-enumerable and non-configurable.
   // To be consistent with our normal functions we leave this as it is.
-
-  %FunctionRemovePrototype(result);
-  %FunctionSetBound(result);
-  // Set the correct length. If this is a function proxy, this.length might
-  // throw, or return a bogus result. Leave length alone in that case.
-  // TODO(rossberg): This is underspecified in the current proxy proposal.
-  try {
-    var old_length = ToInteger(this.length);
-    var length = (old_length - argc_bound) > 0 ? old_length - argc_bound : 0;
-    %BoundFunctionSetLength(result, length);
-  } catch(x) {}
+  // TODO(lrn): Do set these to be thrower.
   return result;
 }
 
diff --git a/src/v8utils.h b/src/v8utils.h
index aada521..c73222a 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -142,8 +142,14 @@
 }
 
 
-template <typename T>
-static inline void MemsetPointer(T** dest, T* value, int counter) {
+template <typename T, typename U>
+inline void MemsetPointer(T** dest, U* value, int counter) {
+#ifdef DEBUG
+  T* a = NULL;
+  U* b = NULL;
+  a = b;  // Fake assignment to check assignability.
+  USE(a);
+#endif  // DEBUG
 #if defined(V8_HOST_ARCH_IA32)
 #define STOS "stosl"
 #elif defined(V8_HOST_ARCH_X64)
@@ -196,7 +202,7 @@
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
-static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
diff --git a/src/variables.cc b/src/variables.cc
index 971061b..aa6a010 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -37,10 +37,11 @@
 // ----------------------------------------------------------------------------
 // Implementation Variable.
 
-const char* Variable::Mode2String(Mode mode) {
+const char* Variable::Mode2String(VariableMode mode) {
   switch (mode) {
     case VAR: return "VAR";
     case CONST: return "CONST";
+    case CONST_HARMONY: return "CONST";
     case LET: return "LET";
     case DYNAMIC: return "DYNAMIC";
     case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -55,21 +56,26 @@
 
 Variable::Variable(Scope* scope,
                    Handle<String> name,
-                   Mode mode,
+                   VariableMode mode,
                    bool is_valid_LHS,
-                   Kind kind)
+                   Kind kind,
+                   InitializationFlag initialization_flag)
   : scope_(scope),
     name_(name),
     mode_(mode),
     kind_(kind),
     location_(UNALLOCATED),
     index_(-1),
+    initializer_position_(RelocInfo::kNoPosition),
     local_if_not_shadowed_(NULL),
     is_valid_LHS_(is_valid_LHS),
-    is_accessed_from_inner_scope_(false),
-    is_used_(false) {
-  // names must be canonicalized for fast equality checks
+    force_context_allocation_(false),
+    is_used_(false),
+    initialization_flag_(initialization_flag) {
+  // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
+  // Var declared variables never need initialization.
+  ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
 
 
@@ -79,4 +85,12 @@
   return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
 }
 
+
+int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
+  int x = (*v)->index();
+  int y = (*w)->index();
+  // Consider sorting them according to type as well?
+  return x - y;
+}
+
 } }  // namespace v8::internal
diff --git a/src/variables.h b/src/variables.h
index 56c8dab..f20bd39 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -40,34 +40,6 @@
 
 class Variable: public ZoneObject {
  public:
-  enum Mode {
-    // User declared variables:
-    VAR,       // declared via 'var', and 'function' declarations
-
-    CONST,     // declared via 'const' declarations
-
-    LET,       // declared via 'let' declarations
-
-    // Variables introduced by the compiler:
-    DYNAMIC,         // always require dynamic lookup (we don't know
-                     // the declaration)
-
-    DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
-                     // variable is global unless it has been shadowed
-                     // by an eval-introduced variable
-
-    DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
-                     // variable is local and where it is unless it
-                     // has been shadowed by an eval-introduced
-                     // variable
-
-    INTERNAL,        // like VAR, but not user-visible (may or may not
-                     // be in a context)
-
-    TEMPORARY        // temporary variables (not user-visible), never
-                     // in a context
-  };
-
   enum Kind {
     NORMAL,
     THIS,
@@ -103,12 +75,13 @@
 
   Variable(Scope* scope,
            Handle<String> name,
-           Mode mode,
+           VariableMode mode,
            bool is_valid_lhs,
-           Kind kind);
+           Kind kind,
+           InitializationFlag initialization_flag);
 
   // Printing support
-  static const char* Mode2String(Mode mode);
+  static const char* Mode2String(VariableMode mode);
 
   bool IsValidLeftHandSide() { return is_valid_LHS_; }
 
@@ -119,17 +92,20 @@
   Scope* scope() const { return scope_; }
 
   Handle<String> name() const { return name_; }
-  Mode mode() const { return mode_; }
-  bool is_accessed_from_inner_scope() const {
-    return is_accessed_from_inner_scope_;
+  VariableMode mode() const { return mode_; }
+  bool has_forced_context_allocation() const {
+    return force_context_allocation_;
   }
-  void MarkAsAccessedFromInnerScope() {
+  void ForceContextAllocation() {
     ASSERT(mode_ != TEMPORARY);
-    is_accessed_from_inner_scope_ = true;
+    force_context_allocation_ = true;
   }
   bool is_used() { return is_used_; }
   void set_is_used(bool flag) { is_used_ = flag; }
 
+  int initializer_position() { return initializer_position_; }
+  void set_initializer_position(int pos) { initializer_position_ = pos; }
+
   bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
@@ -146,6 +122,13 @@
             mode_ == DYNAMIC_GLOBAL ||
             mode_ == DYNAMIC_LOCAL);
   }
+  bool is_const_mode() const {
+    return (mode_ == CONST ||
+            mode_ == CONST_HARMONY);
+  }
+  bool binding_needs_init() const {
+    return initialization_flag_ == kNeedsInitialization;
+  }
 
   bool is_global() const;
   bool is_this() const { return kind_ == THIS; }
@@ -153,8 +136,7 @@
 
   // True if the variable is named eval and not known to be shadowed.
   bool is_possibly_eval() const {
-    return IsVariable(FACTORY->eval_symbol()) &&
-        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+    return IsVariable(FACTORY->eval_symbol());
   }
 
   Variable* local_if_not_shadowed() const {
@@ -168,28 +150,39 @@
 
   Location location() const { return location_; }
   int index() const { return index_; }
+  InitializationFlag initialization_flag() const {
+    return initialization_flag_;
+  }
 
   void AllocateTo(Location location, int index) {
     location_ = location;
     index_ = index;
   }
 
+  static int CompareIndex(Variable* const* v, Variable* const* w);
+
  private:
   Scope* scope_;
   Handle<String> name_;
-  Mode mode_;
+  VariableMode mode_;
   Kind kind_;
   Location location_;
   int index_;
+  int initializer_position_;
 
+  // If this field is set, this variable references the stored locally bound
+  // variable, but it might be shadowed by variable bindings introduced by
+  // non-strict 'eval' calls between the reference scope (inclusive) and the
+  // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
 
   // Valid as a LHS? (const and this are not valid LHS, for example)
   bool is_valid_LHS_;
 
   // Usage info.
-  bool is_accessed_from_inner_scope_;  // set by variable resolver
+  bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
+  InitializationFlag initialization_flag_;
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index 2c21152..3b85fb4 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     6
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       19
+#define MINOR_VERSION     7
+#define BUILD_NUMBER      12
+#define PATCH_LEVEL       28
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/weakmap.js b/src/weakmap.js
deleted file mode 100644
index 5fb5151..0000000
--- a/src/weakmap.js
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// const $Object = global.Object;
-const $WeakMap = global.WeakMap;
-
-// -------------------------------------------------------------------
-
-function WeakMapConstructor() {
-  if (%_IsConstructCall()) {
-    %WeakMapInitialize(this);
-  } else {
-    return new $WeakMap();
-  }
-}
-
-
-function WeakMapGet(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return !IS_UNDEFINED(%WeakMapGet(this, key));
-}
-
-
-function WeakMapDelete(key) {
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
-    %WeakMapSet(this, key, void 0);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// -------------------------------------------------------------------
-
-(function () {
-  %CheckIsBootstrapping();
-  // Set up the WeakMap constructor function.
-  %SetCode($WeakMap, WeakMapConstructor);
-
-  // Set up the constructor property on the WeakMap prototype object.
-  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the WeakMap prototype object.
-  InstallFunctionsOnHiddenPrototype($WeakMap.prototype, DONT_ENUM, $Array(
-    "get", WeakMapGet,
-    "set", WeakMapSet,
-    "has", WeakMapHas,
-    "delete", WeakMapDelete
-  ));
-})();
diff --git a/src/win32-headers.h b/src/win32-headers.h
index fca5c13..0ee3306 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -75,6 +75,7 @@
 // makes it impossible to have them elsewhere.
 #include <winsock2.h>
 #include <ws2tcpip.h>
+#include <wspiapi.h>
 #include <process.h>  // for _beginthreadex()
 #include <stdlib.h>
 #endif  // V8_WIN32_HEADERS_FULL
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8db54f0..ab387d6 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -224,7 +224,9 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+                              || rmode_ == EMBEDDED_OBJECT
+                              || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
 }
 
@@ -238,10 +240,15 @@
 }
 
 
-void RelocInfo::set_target_address(Address target) {
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
     Assembler::set_target_address_at(pc_, target);
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+      host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+          host(), this, HeapObject::cast(target_code));
+    }
   } else {
     Memory::Address_at(pc_) = target;
     CPU::FlushICache(pc_, sizeof(Address));
@@ -277,10 +284,16 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target) {
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  *reinterpret_cast<Object**>(pc_) = target;
+  Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -301,11 +314,19 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
+                                WriteBarrierMode mode) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -344,6 +365,11 @@
       target;
   CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
                    sizeof(Address));
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -368,14 +394,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(target_reference_address());
+    visitor->VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -396,14 +422,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(target_reference_address());
+    StaticVisitor::VisitExternalReference(this);
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 745fdae..3290f7e 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -772,7 +772,7 @@
                                           Register dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Use 64-bit mode byte registers.
     emit_rex_64(dst);
   }
@@ -1056,7 +1056,7 @@
 
 void Assembler::decb(Register dst) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
   }
@@ -1384,7 +1384,7 @@
 
 void Assembler::movb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst, src);
   } else {
@@ -1397,7 +1397,7 @@
 
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
-  if (dst.code() > 3) {
+  if (!dst.is_byte_register()) {
     emit_rex_32(dst);
   }
   emit(0xB0 + dst.low_bits());
@@ -1407,7 +1407,7 @@
 
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  if (src.code() > 3) {
+  if (!src.is_byte_register()) {
     emit_rex_32(src, dst);
   } else {
     emit_optional_rex_32(src, dst);
@@ -1937,7 +1937,7 @@
   }
   EnsureSpace ensure_space(this);
   ASSERT(is_uint4(cc));
-  if (reg.code() > 3) {  // Use x64 byte registers, where different.
+  if (!reg.is_byte_register()) {  // Use x64 byte registers, where different.
     emit_rex_32(reg);
   }
   emit(0x0F);
@@ -2002,7 +2002,7 @@
     emit(0x84);
     emit_modrm(src, dst);
   } else {
-    if (dst.code() > 3 || src.code() > 3) {
+    if (!dst.is_byte_register() || !src.is_byte_register()) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(dst, src);
     }
@@ -2019,7 +2019,7 @@
     emit(0xA8);
     emit(mask.value_);  // Low byte emitted.
   } else {
-    if (reg.code() > 3) {
+    if (!reg.is_byte_register()) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(reg);
     }
@@ -2042,7 +2042,7 @@
 
 void Assembler::testb(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  if (reg.code() > 3) {
+  if (!reg.is_byte_register()) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg, op);
   } else {
@@ -2299,6 +2299,13 @@
 }
 
 
+void Assembler::fptan() {
+  EnsureSpace ensure_space(this);
+  emit(0xD9);
+  emit(0xF2);
+}
+
+
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   emit(0xD9);
@@ -2983,7 +2990,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 2e373fa..e3b73f0 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 // A lightweight X64 Assembler.
 
@@ -45,22 +45,22 @@
 // Utility functions
 
 // Test whether a 64-bit value is in a specific range.
-static inline bool is_uint32(int64_t x) {
+inline bool is_uint32(int64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return static_cast<uint64_t>(x) <= kMaxUInt32;
 }
 
-static inline bool is_int32(int64_t x) {
+inline bool is_int32(int64_t x) {
   static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
   return is_uint32(x - kMinInt32);
 }
 
-static inline bool uint_is_int32(uint64_t x) {
+inline bool uint_is_int32(uint64_t x) {
   static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
   return x <= kMaxInt32;
 }
 
-static inline bool is_uint32(uint64_t x) {
+inline bool is_uint32(uint64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return x <= kMaxUInt32;
 }
@@ -131,6 +131,8 @@
   }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
+  // rax, rbx, rcx and rdx are byte registers, the rest are not.
+  bool is_byte_register() const { return code_ <= 3; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -215,6 +217,12 @@
     return names[index];
   }
 
+  static XMMRegister from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
+    XMMRegister r = { code };
+    return r;
+  }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
@@ -643,7 +651,6 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
-  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -735,6 +742,10 @@
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
+  void addl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x01, src, dst);
+  }
+
   void addq(Register dst, Register src) {
     arithmetic_op(0x03, dst, src);
   }
@@ -1266,6 +1277,7 @@
 
   void fsin();
   void fcos();
+  void fptan();
   void fyl2x();
 
   void frndint();
@@ -1394,13 +1406,14 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index db06909..e423ae3 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -79,12 +79,12 @@
   //  -- rdi: constructor function
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that function is not a smi.
   __ JumpIfSmi(rdi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &non_function_call);
+  __ j(not_equal, &slow);
 
   // Jump to the function-specific construct stub.
   __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -94,10 +94,19 @@
 
   // rdi: called object
   // rax: number of arguments
+  // rcx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing rax).
   __ Set(rbx, 0);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(rcx, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -110,273 +119,279 @@
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
-    // Enter a construct frame.
-  __ EnterConstructFrame();
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+    // Store a smi-tagged arguments count on the stack.
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
 
-  // Push the function to invoke on the stack.
-  __ push(rdi);
+    // Push the function to invoke on the stack.
+    __ push(rdi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ movq(kScratchRegister, debug_step_in_fp);
-    __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ movq(kScratchRegister, debug_step_in_fp);
+      __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // rdi: constructor
-    __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    STATIC_ASSERT(kSmiTag == 0);
-    __ JumpIfSmi(rax, &rt_call);
-    // rdi: constructor
-    // rax: initial map (if proven valid below)
-    __ CmpObjectType(rax, MAP_TYPE, rbx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // rdi: constructor
+      __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      ASSERT(kSmiTag == 0);
+      __ JumpIfSmi(rax, &rt_call);
+      // rdi: constructor
+      // rax: initial map (if proven valid below)
+      __ CmpObjectType(rax, MAP_TYPE, rbx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // rdi: constructor
-    // rax: initial map
-    __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // rdi: constructor
+      // rax: initial map
+      __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-      __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(rax);
-      __ push(rdi);
-
-      __ push(rdi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(rdi);
-      __ pop(rax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
-    __ shl(rdi, Immediate(kPointerSizeLog2));
-    // rdi: size of new object
-    __ AllocateInNewSpace(rdi,
-                          rbx,
-                          rdi,
-                          no_reg,
-                          &rt_call,
-                          NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // rax: initial map
-    // rbx: JSObject (not HeapObject tagged - the actual address).
-    // rdi: start of next object
-    __ movq(Operand(rbx, JSObject::kMapOffset), rax);
-    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-    __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
-    __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
-    // Set extra fields in the newly allocated object.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        Label allocate;
+        // Decrease generous allocation count.
+        __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+        __ decb(FieldOperand(rcx,
+                             SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(rax);
+        __ push(rdi);
+
+        __ push(rdi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(rdi);
+        __ pop(rax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+      __ shl(rdi, Immediate(kPointerSizeLog2));
+      // rdi: size of new object
+      __ AllocateInNewSpace(rdi,
+                            rbx,
+                            rdi,
+                            no_reg,
+                            &rt_call,
+                            NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // rax: initial map
+      // rbx: JSObject (not HeapObject tagged - the actual address).
+      // rdi: start of next object
+      __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+      __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+      __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+      __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+      // Set extra fields in the newly allocated object.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
       __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rdi);
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    __ or_(rbx, Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    // Calculate total properties described map.
-    __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
-    __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ addq(rdx, rcx);
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
-    __ subq(rdx, rcx);
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // rbx: JSObject
-    // rdi: start of next object (will be start of FixedArray)
-    // rdx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          rdx,
-                          rdi,
-                          rax,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rdx: number of elements
-    // rax: start of next object
-    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-    __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
-    __ Integer32ToSmi(rdx, rdx);
-    __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
-
-    // Initialize the fields to undefined.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rax: start of next object
-    // rdx: number of elements
-    { Label loop, entry;
       __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-      __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rax);
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzxbq(rsi,
+                   FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(rsi,
+               Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
+        // rsi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmpq(rsi, rdi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+      }
+      __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      __ or_(rbx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      // Calculate total properties described map.
+      __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+      __ movzxbq(rcx,
+                 FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ addq(rdx, rcx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+      __ subq(rdx, rcx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // rbx: JSObject
+      // rdi: start of next object (will be start of FixedArray)
+      // rdx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            rdx,
+                            rdi,
+                            rax,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rdx: number of elements
+      // rax: start of next object
+      __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+      __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
+      __ Integer32ToSmi(rdx, rdx);
+      __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
+
+      // Initialize the fields to undefined.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rax: start of next object
+      // rdx: number of elements
+      { Label loop, entry;
+        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ movq(Operand(rcx, 0), rdx);
+        __ addq(rcx, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmpq(rcx, rax);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // rbx: JSObject
+      // rdi: FixedArray
+      __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+      // Continue with JSObject being successfully allocated
+      // rbx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // rbx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(rbx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // rbx: JSObject
-    // rdi: FixedArray
-    __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
-    __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+    // Allocate the new receiver object using the runtime call.
+    // rdi: function (constructor)
+    __ bind(&rt_call);
+    // Must restore rdi (constructor) before calling runtime.
+    __ movq(rdi, Operand(rsp, 0));
+    __ push(rdi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ movq(rbx, rax);  // store result in rbx
 
+    // New object allocated.
+    // rbx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(rdi);
 
-    // Continue with JSObject being successfully allocated
-    // rbx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ movq(rax, Operand(rsp, 0));
+    __ SmiToInteger32(rax, rax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // rbx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(rbx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(rbx);
+    __ push(rbx);
+
+    // Setup pointer to last argument.
+    __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ movq(rcx, rax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(rbx, rcx, times_pointer_size, 0));
+    __ bind(&entry);
+    __ decq(rcx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(rax);
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Restore context from the frame.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(rax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ movq(rax, Operand(rsp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ movq(rbx, Operand(rsp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // rdi: function (constructor)
-  __ bind(&rt_call);
-  // Must restore rdi (constructor) before calling runtime.
-  __ movq(rdi, Operand(rsp, 0));
-  __ push(rdi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ movq(rbx, rax);  // store result in rbx
-
-  // New object allocated.
-  // rbx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(rdi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ movq(rax, Operand(rsp, 0));
-  __ SmiToInteger32(rax, rax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(rbx);
-  __ push(rbx);
-
-  // Setup pointer to last argument.
-  __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ movq(rcx, rax);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(rbx, rcx, times_pointer_size, 0));
-  __ bind(&entry);
-  __ decq(rcx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(rax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ movq(rax, Operand(rsp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ movq(rbx, Operand(rsp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   __ pop(rcx);
   SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -413,104 +428,108 @@
   // - Object*** argv
   // (see Handle::Invoke in execution.cc).
 
-  // Platform specific argument handling. After this, the stack contains
-  // an internal frame and the pushed function and receiver, and
-  // register rax and rbx holds the argument count and argument array,
-  // while rdi holds the function pointer and rsi the context.
+  // Open a C++ scope for the FrameScope.
+  {
+    // Platform specific argument handling. After this, the stack contains
+    // an internal frame and the pushed function and receiver, and
+    // register rax and rbx holds the argument count and argument array,
+    // while rdi holds the function pointer and rsi the context.
+
 #ifdef _WIN64
-  // MSVC parameters in:
-  // rcx : entry (ignored)
-  // rdx : function
-  // r8 : receiver
-  // r9 : argc
-  // [rsp+0x20] : argv
+    // MSVC parameters in:
+    // rcx : entry (ignored)
+    // rdx : function
+    // r8 : receiver
+    // r9 : argc
+    // [rsp+0x20] : argv
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the function context into rsi.
-  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+    // Load the function context into rsi.
+    __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(rdx);
-  __ push(r8);
+    // Push the function and the receiver onto the stack.
+    __ push(rdx);
+    __ push(r8);
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, r9);
-  // Load the previous frame pointer to access C argument on stack
-  __ movq(kScratchRegister, Operand(rbp, 0));
-  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
-  // Load the function pointer into rdi.
-  __ movq(rdi, rdx);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, r9);
+    // Load the previous frame pointer to access C argument on stack
+    __ movq(kScratchRegister, Operand(rbp, 0));
+    __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+    // Load the function pointer into rdi.
+    __ movq(rdi, rdx);
 #else  // _WIN64
-  // GCC parameters in:
-  // rdi : entry (ignored)
-  // rsi : function
-  // rdx : receiver
-  // rcx : argc
-  // r8  : argv
+    // GCC parameters in:
+    // rdi : entry (ignored)
+    // rsi : function
+    // rdx : receiver
+    // rcx : argc
+    // r8  : argv
 
-  __ movq(rdi, rsi);
-  // rdi : function
+    __ movq(rdi, rsi);
+    // rdi : function
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the function and receiver and setup the context.
-  __ push(rdi);
-  __ push(rdx);
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Push the function and receiver and setup the context.
+    __ push(rdi);
+    __ push(rdx);
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, rcx);
-  __ movq(rbx, r8);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, rcx);
+    __ movq(rbx, r8);
 #endif  // _WIN64
 
-  // Current stack contents:
-  // [rsp + 2 * kPointerSize ... ]: Internal frame
-  // [rsp + kPointerSize]         : function
-  // [rsp]                        : receiver
-  // Current register contents:
-  // rax : argc
-  // rbx : argv
-  // rsi : context
-  // rdi : function
+    // Current stack contents:
+    // [rsp + 2 * kPointerSize ... ]: Internal frame
+    // [rsp + kPointerSize]         : function
+    // [rsp]                        : receiver
+    // Current register contents:
+    // rax : argc
+    // rbx : argv
+    // rsi : context
+    // rdi : function
 
-  // Copy arguments to the stack in a loop.
-  // Register rbx points to array of pointers to handle locations.
-  // Push the values of these handles.
-  Label loop, entry;
-  __ Set(rcx, 0);  // Set loop variable to 0.
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
-  __ push(Operand(kScratchRegister, 0));  // dereference handle
-  __ addq(rcx, Immediate(1));
-  __ bind(&entry);
-  __ cmpq(rcx, rax);
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    // Register rbx points to array of pointers to handle locations.
+    // Push the values of these handles.
+    Label loop, entry;
+    __ Set(rcx, 0);  // Set loop variable to 0.
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+    __ push(Operand(kScratchRegister, 0));  // dereference handle
+    __ addq(rcx, Immediate(1));
+    __ bind(&entry);
+    __ cmpq(rcx, rax);
+    __ j(not_equal, &loop);
 
-  // Invoke the code.
-  if (is_construct) {
-    // Expects rdi to hold function pointer.
-    __ Call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(rax);
-    // Function must be in rdi.
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      // Expects rdi to hold function pointer.
+      __ Call(masm->isolate()->builtins()->JSConstructCall(),
+              RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(rax);
+      // Function must be in rdi.
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the internal frame. Notice that this also removes the empty
+    // context and the function left on the stack by the code
+    // invocation.
   }
 
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
   // TODO(X64): Is argument correct? Is there a receiver to remove?
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(1 * kPointerSize);  // Remove receiver.
 }
 
 
@@ -526,23 +545,24 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore receiver.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore receiver.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -552,23 +572,24 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore function.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore function.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -579,14 +600,15 @@
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the deoptimization type to the runtime system.
-  __ Push(Smi::FromInt(static_cast<int>(type)));
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
 
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -623,9 +645,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ Pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ Popad();
   __ ret(0);
 }
@@ -647,7 +670,7 @@
     __ testq(rax, rax);
     __ j(not_zero, &done);
     __ pop(rbx);
-    __ Push(FACTORY->undefined_value());
+    __ Push(masm->isolate()->factory()->undefined_value());
     __ push(rbx);
     __ incq(rax);
     __ bind(&done);
@@ -695,18 +718,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ Integer32ToSmi(rax, rax);
-    __ push(rax);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Integer32ToSmi(rax, rax);
+      __ push(rax);
 
-    __ push(rbx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ movq(rbx, rax);
-    __ Set(rdx, 0);  // indicate regular JS_FUNCTION
+      __ push(rbx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ movq(rbx, rax);
+      __ Set(rdx, 0);  // indicate regular JS_FUNCTION
 
-    __ pop(rax);
-    __ SmiToInteger32(rax, rax);
-    __ LeaveInternalFrame();
+      __ pop(rax);
+      __ SmiToInteger32(rax, rax);
+    }
+
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
     __ jmp(&patch_receiver, Label::kNear);
@@ -807,168 +833,166 @@
   //  rsp+8: arguments
   // rsp+16: receiver ("this")
   // rsp+24: function
-  __ EnterInternalFrame();
-  // Stack frame:
-  //    rbp: Old base pointer
-  // rbp[1]: return address
-  // rbp[2]: function arguments
-  // rbp[3]: receiver
-  // rbp[4]: function
-  static const int kArgumentsOffset = 2 * kPointerSize;
-  static const int kReceiverOffset = 3 * kPointerSize;
-  static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Stack frame:
+    //    rbp: Old base pointer
+    // rbp[1]: return address
+    // rbp[2]: function arguments
+    // rbp[3]: receiver
+    // rbp[4]: function
+    static const int kArgumentsOffset = 2 * kPointerSize;
+    static const int kReceiverOffset = 3 * kPointerSize;
+    static const int kFunctionOffset = 4 * kPointerSize;
 
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(Operand(rbp, kArgumentsOffset));
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(Operand(rbp, kArgumentsOffset));
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
-  __ movq(rcx, rsp);
-  // Make rcx the space we have left. The stack might already be overflowed
-  // here which will cause rcx to become negative.
-  __ subq(rcx, kScratchRegister);
-  // Make rdx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmpq(rcx, rdx);
-  __ j(greater, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+    __ movq(rcx, rsp);
+    // Make rcx the space we have left. The stack might already be overflowed
+    // here which will cause rcx to become negative.
+    __ subq(rcx, kScratchRegister);
+    // Make rdx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmpq(rcx, rdx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(rax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Out of stack space.
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(rax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(rax);  // limit
-  __ push(Immediate(0));  // index
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(rax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Get the receiver.
-  __ movq(rbx, Operand(rbp, kReceiverOffset));
+    // Get the receiver.
+    __ movq(rbx, Operand(rbp, kReceiverOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives.
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for natives.
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
-  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-  __ j(equal, &use_global_receiver);
-  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+    __ j(equal, &use_global_receiver);
+    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, &use_global_receiver);
 
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &push_receiver);
+    // If given receiver is already a JavaScript object then there's no
+    // reason for converting it.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &push_receiver);
 
-  // Convert the receiver to an object.
-  __ bind(&call_to_object);
-  __ push(rbx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ movq(rbx, rax);
-  __ jmp(&push_receiver, Label::kNear);
+    // Convert the receiver to an object.
+    __ bind(&call_to_object);
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+    __ jmp(&push_receiver, Label::kNear);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(rbx);
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(rbx);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic =
+        masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Push the nth argument.
-  __ push(rax);
+    // Push the nth argument.
+    __ push(rax);
 
-  // Update the index on the stack and in register rax.
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  __ movq(Operand(rbp, kIndexOffset), rax);
+    // Update the index on the stack and in register rax.
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+    __ movq(Operand(rbp, kIndexOffset), rax);
 
-  __ bind(&entry);
-  __ cmpq(rax, Operand(rbp, kLimitOffset));
-  __ j(not_equal, &loop);
+    __ bind(&entry);
+    __ cmpq(rax, Operand(rbp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(rax);
-  __ SmiToInteger32(rax, rax);
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(rax);
+    __ SmiToInteger32(rax, rax);
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(rdi);  // add function proxy as last argument
-  __ incq(rax);
-  __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(rdi);  // add function proxy as last argument
+    __ incq(rax);
+    __ Set(rbx, 0);
+    __ SetCallKind(rcx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -979,9 +1003,9 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(initial_capacity >= 0);
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  STATIC_ASSERT(initial_capacity >= 0);
 
   // Load the initial map from the array function.
   __ movq(scratch1, FieldOperand(array_function,
@@ -1005,9 +1029,10 @@
   // result: JSObject
   // scratch1: initial map
   // scratch2: start of next object
+  Factory* factory = masm->isolate()->factory();
   __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
   __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
-          FACTORY->empty_fixed_array());
+          factory->empty_fixed_array());
   // Field JSArray::kElementsOffset is initialized later.
   __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
 
@@ -1015,7 +1040,7 @@
   // fixed array.
   if (initial_capacity == 0) {
     __ Move(FieldOperand(result, JSArray::kElementsOffset),
-            FACTORY->empty_fixed_array());
+            factory->empty_fixed_array());
     return;
   }
 
@@ -1032,15 +1057,14 @@
   // scratch1: elements array
   // scratch2: start of next object
   __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
-          FACTORY->fixed_array_map());
+          factory->fixed_array_map());
   __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
           Smi::FromInt(initial_capacity));
 
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
-  __ Move(scratch3, FACTORY->the_hole_value());
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -1051,13 +1075,17 @@
     }
   } else {
     Label loop, entry;
+    __ movq(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ movq(Operand(scratch1, 0), scratch3);
-    __ addq(scratch1, Immediate(kPointerSize));
+    __ movq(FieldOperand(scratch1,
+                         scratch2,
+                         times_pointer_size,
+                         FixedArray::kHeaderSize),
+            scratch3);
     __ bind(&entry);
-    __ cmpq(scratch1, scratch2);
-    __ j(below, &loop);
+    __ decq(scratch2);
+    __ j(not_sign, &loop);
   }
 }
 
@@ -1073,38 +1101,25 @@
 // register elements_array is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array,
                             Register elements_array_end,
                             Register scratch,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
-
   // Load the initial map from the array function.
   __ movq(elements_array,
           FieldOperand(array_function,
                        JSFunction::kPrototypeOrInitialMapOffset));
 
-  // Check whether an empty sized array is requested.
-  __ testq(array_size, array_size);
-  __ j(not_zero, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
+  if (FLAG_debug_code) {  // Assert that array size is not zero.
+    __ testq(array_size, array_size);
+    __ Assert(not_zero, "array size is unexpectedly 0");
+  }
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
-  __ bind(&not_empty);
   SmiIndex index =
       masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1122,9 +1137,9 @@
   // elements_array: initial map
   // elements_array_end: start of next object
   // array_size: size of array (smi)
-  __ bind(&allocated);
+  Factory* factory = masm->isolate()->factory();
   __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  __ Move(elements_array, FACTORY->empty_fixed_array());
+  __ Move(elements_array, factory->empty_fixed_array());
   __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
   // Field JSArray::kElementsOffset is initialized later.
   __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1143,16 +1158,7 @@
   // elements_array_end: start of next object
   // array_size: size of array (smi)
   __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
-          FACTORY->fixed_array_map());
-  Label not_empty_2, fill_array;
-  __ SmiTest(array_size);
-  __ j(not_zero, &not_empty_2);
-  // Length of the FixedArray is the number of pre-allocated elements even
-  // though the actual JSArray has length 0.
-  __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
-          Smi::FromInt(kPreallocatedArrayElements));
-  __ jmp(&fill_array);
-  __ bind(&not_empty_2);
+          factory->fixed_array_map());
   // For non-empty JSArrays the length of the FixedArray and the JSArray is the
   // same.
   __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1161,10 +1167,9 @@
   // result: JSObject
   // elements_array: elements array
   // elements_array_end: start of next object
-  __ bind(&fill_array);
   if (fill_with_hole) {
     Label loop, entry;
-    __ Move(scratch, FACTORY->the_hole_value());
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
     __ lea(elements_array, Operand(elements_array,
                                    FixedArray::kHeaderSize - kHeapObjectTag));
     __ jmp(&entry);
@@ -1194,12 +1199,13 @@
 // a construct call and a normal call.
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label *call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more;
+  Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array;
 
   // Check for array construction with zero arguments.
   __ testq(rax, rax);
   __ j(not_zero, &argc_one_or_more);
 
+  __ bind(&empty_array);
   // Handle construction of an empty array.
   AllocateEmptyJSArray(masm,
                        rdi,
@@ -1207,7 +1213,6 @@
                        rcx,
                        rdx,
                        r8,
-                       kPreallocatedArrayElements,
                        call_generic_code);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->array_function_native(), 1);
@@ -1220,6 +1225,16 @@
   __ cmpq(rax, Immediate(1));
   __ j(not_equal, &argc_two_or_more);
   __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
+
+  __ SmiTest(rdx);
+  __ j(not_zero, &not_empty_array);
+  __ pop(r8);  // Adjust stack.
+  __ Drop(1);
+  __ push(r8);
+  __ movq(rax, Immediate(0));  // Treat this as a call with argc of zero.
+  __ jmp(&empty_array);
+
+  __ bind(&not_empty_array);
   __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
@@ -1520,10 +1535,11 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(rax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1541,7 +1557,9 @@
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 6499ea0..98c5c6f 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -68,9 +68,9 @@
   // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
-  int map_index = strict_mode_ == kStrictMode
-      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
-      : Context::FUNCTION_MAP_INDEX;
+  int map_index = (language_mode_ == CLASSIC_MODE)
+      ? Context::FUNCTION_MAP_INDEX
+      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -155,6 +155,131 @@
 }
 
 
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + (1 * kPointerSize)]: function
+  // [rsp + (2 * kPointerSize)]: serialized scope info
+
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        rax, rbx, rcx, &gc, TAG_OBJECT);
+
+  // Get the function from the stack.
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+  // Get the serialized scope info from the stack.
+  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+  // Setup the object header.
+  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
+  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+  // If this block context is nested in the global context we get a smi
+  // sentinel instead of a function. The block context should get the
+  // canonical empty function of the global context as its closure which
+  // we still have to look up.
+  Label after_sentinel;
+  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
+  if (FLAG_debug_code) {
+    const char* message = "Expected 0 as a Smi sentinel";
+    __ cmpq(rcx, Immediate(0));
+    __ Assert(equal, message);
+  }
+  __ movq(rcx, GlobalObjectOperand());
+  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+  __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
+  __ bind(&after_sentinel);
+
+  // Setup the fixed slots.
+  __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
+  __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
+  __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
+
+  // Copy the global object from the previous context.
+  __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
+
+  // Initialize the rest of the slots to the hole value.
+  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < slots_; i++) {
+    __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
+  }
+
+  // Return and remove the on-stack parameter.
+  __ movq(rsi, rax);
+  __ ret(2 * kPointerSize);
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
+  //
+  // rcx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = 0;
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
+  }
+  int size = JSArray::kSize + elements_size;
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rax, i), rbx);
+    }
+  }
+
+  if (length > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ lea(rdx, Operand(rax, JSArray::kSize));
+    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+    // Copy the elements array.
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
+      for (int i = 0; i < elements_size; i += kPointerSize) {
+        __ movq(rbx, FieldOperand(rcx, i));
+        __ movq(FieldOperand(rdx, i), rbx);
+      }
+    } else {
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
+      int i;
+      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
+        __ movq(rbx, FieldOperand(rcx, i));
+        __ movq(FieldOperand(rdx, i), rbx);
+      }
+      while (i < elements_size) {
+        __ movsd(xmm0, FieldOperand(rcx, i));
+        __ movsd(FieldOperand(rdx, i), xmm0);
+        i += kDoubleSize;
+      }
+      ASSERT(i == elements_size);
+    }
+  }
+}
+
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -162,29 +287,54 @@
   // [rsp + (2 * kPointerSize)]: literal index.
   // [rsp + (3 * kPointerSize)]: literals array.
 
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
-  int size = JSArray::kSize + elements_size;
-
   // Load boilerplate object into rcx and check if we need to create a
   // boilerplate.
-  Label slow_case;
   __ movq(rcx, Operand(rsp, 3 * kPointerSize));
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
   __ movq(rcx,
           FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
   __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  Label slow_case;
   __ j(equal, &slow_case);
 
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // rcx is boilerplate object.
+  Factory* factory = masm->isolate()->factory();
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_cow_array_map());
+    __ j(not_equal, &check_fast_elements);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_array_map());
+    __ j(not_equal, &double_elements);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -196,33 +346,7 @@
     __ pop(rcx);
   }
 
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
-      __ movq(rbx, FieldOperand(rcx, i));
-      __ movq(FieldOperand(rax, i), rbx);
-    }
-  }
-
-  if (length_ > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
-    __ lea(rdx, Operand(rax, JSArray::kSize));
-    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
-    // Copy the elements array.
-    for (int i = 0; i < elements_size; i += kPointerSize) {
-      __ movq(rbx, FieldOperand(rcx, i));
-      __ movq(FieldOperand(rdx, i), rbx);
-    }
-  }
-
-  // Return and remove the on-stack parameters.
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
@@ -230,9 +354,54 @@
 }
 
 
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + kPointerSize]: object literal flags.
+  // [rsp + (2 * kPointerSize)]: constant properties.
+  // [rsp + (3 * kPointerSize)]: literal index.
+  // [rsp + (4 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into ecx and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+  __ movq(rax, Operand(rsp, 3 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ movq(rcx,
+          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &slow_case);
+
+  // Check that the boilerplate contains only fast properties and we can
+  // statically determine the instance size.
+  int size = JSObject::kHeaderSize + length_ * kPointerSize;
+  __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
+  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
+  __ j(not_equal, &slow_case);
+
+  // Allocate the JS object and copy header together with all in-object
+  // properties from the boilerplate.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+  for (int i = 0; i < size; i += kPointerSize) {
+    __ movq(rbx, FieldOperand(rcx, i));
+    __ movq(FieldOperand(rax, i), rbx);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ ret(4 * kPointerSize);
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
+
+
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   const Register argument = rax;
   const Register map = rdx;
@@ -328,6 +497,25 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  __ PushCallerSaved(save_doubles_);
+  const int argument_count = 1;
+  __ PrepareCallCFunction(argument_count);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  __ PopCallerSaved(save_doubles_);
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -622,12 +810,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ movq(rcx, rax);
-    __ pop(rax);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ movq(rcx, rax);
+      __ pop(rax);
+    }
     __ bind(&heapnumber_allocated);
     // rcx: allocated 'empty' number
 
@@ -751,6 +940,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1414,6 +1607,8 @@
   __ cmpq(rbx, Operand(rcx, 0));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ movq(rax, Operand(rcx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);  // Clear FPU stack.
@@ -1424,6 +1619,7 @@
   }
 
   __ bind(&cache_miss);
+  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   if (tagged) {
   __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
@@ -1453,11 +1649,12 @@
     __ addq(rsp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ Push(Smi::FromInt(2 * kDoubleSize));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ Push(Smi::FromInt(2 * kDoubleSize));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -1473,10 +1670,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(rax, rdi, &skip_cache);
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -1488,6 +1686,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -1503,7 +1702,9 @@
   // rcx: Pointer to cache entry. Must be preserved.
   // st(0): Input double
   Label done;
-  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+  if (type_ == TranscendentalCache::SIN ||
+      type_ == TranscendentalCache::COS ||
+      type_ == TranscendentalCache::TAN) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -1573,6 +1774,12 @@
       case TranscendentalCache::COS:
         __ fcos();
         break;
+      case TranscendentalCache::TAN:
+        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
+        // FP register stack.
+        __ fptan();
+        __ fstp(0);  // Pop FP register stack.
+        break;
       default:
         UNREACHABLE();
     }
@@ -2346,10 +2553,6 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
-  if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-    return;
-  }
 
   // Stack frame on entry.
   //  rsp[0]: return address
@@ -2455,26 +2658,40 @@
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ andb(rbx, Immediate(
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+  __ andb(rbx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kStringEncodingMask |
+                         kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.
-  __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+  // Any other flat string must be a flat ascii string.  None of the following
+  // string type tests will succeed if subject is not a string or a short
+  // external string.
+  __ andb(rbx, Immediate(kIsNotStringMask |
+                         kStringRepresentationMask |
+                         kShortExternalStringMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
+  // rbx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, check_encoding;
+  Label cons_string, external_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmpq(rbx, Immediate(kExternalStringTag));
   __ j(less, &cons_string, Label::kNear);
-  __ j(equal, &runtime);
+  __ j(equal, &external_string);
+
+  // Catch non-string subject or short external string.
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+  __ j(not_zero, &runtime);
 
   // String is sliced.
   __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
@@ -2498,10 +2715,10 @@
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be ascii.
+  // Any other flat string must be sequential ascii or external.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
-  __ j(not_zero, &runtime);
+  __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
   // rdi: subject string (sequential ascii)
@@ -2670,12 +2887,18 @@
   // Store last subject and last input.
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastSubjectOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastInputOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   __ LoadAddress(rcx,
@@ -2729,6 +2952,27 @@
   __ bind(&termination_exception);
   __ ThrowUncatchable(TERMINATION, rax);
 
+  // External string.  Short external strings have already been ruled out.
+  // rdi: subject string (expected to be external)
+  // rbx: scratch
+  __ bind(&external_string);
+  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ testb(rbx, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ testb(rbx, Immediate(kStringEncodingMask));
+  __ j(not_zero, &seq_ascii_string);
+  __ jmp(&seq_two_byte_string);
+
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3231,7 +3475,24 @@
 }
 
 
+void CallFunctionStub::FinishCode(Handle<Code> code) {
+  code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  // rdi : the function to call
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -3252,10 +3513,6 @@
     __ bind(&call);
   }
 
-  // Get the function to call from the stack.
-  // +2 ~ receiver, return address
-  __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(rdi, &non_function);
   // Goto slow case if we do not have a function.
@@ -3292,7 +3549,7 @@
   __ push(rcx);
   __ Set(rax, argc_ + 1);
   __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_FUNCTION);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
   {
     Handle<Code> adaptor =
@@ -3319,6 +3576,35 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+#ifdef _WIN64
+  return result_size_ == 1;
+#else
+  return true;
+#endif
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  save_doubles.GetCode()->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   // Throw exception in eax.
   __ Throw(rax);
@@ -3545,7 +3831,7 @@
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, exit;
+  Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
   {  // NOLINT. Scope block confuses linter.
     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
@@ -3605,20 +3891,23 @@
   __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
 
-  // Call a faked try-block that does the invoke.
-  __ call(&invoke);
-
-  // Caught exception: Store result (exception) in the pending
-  // exception field in the JSEnv and return a failure sentinel.
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
   __ Store(pending_exception, rax);
   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
   __ bind(&invoke);
-  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
 
   // Clear any pending exceptions.
   __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -3627,11 +3916,11 @@
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline
-  // builtin and pop the faked function when we return. We load the address
-  // from an external reference instead of inlining the call target address
-  // directly in the code, because the builtin stubs may not have been
-  // generated yet at the time this code is generated.
+  // Invoke the function by calling through JS entry trampoline builtin and
+  // pop the faked function when we return. We load the address from an
+  // external reference instead of inlining the call target address directly
+  // in the code, because the builtin stubs may not have been generated yet
+  // at the time this code is generated.
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate);
@@ -3740,7 +4029,7 @@
     __ bind(&miss);
   }
 
-  __ TryGetFunctionPrototype(rdx, rbx, &slow);
+  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(rbx, &slow);
@@ -3757,14 +4046,17 @@
     __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
   } else {
+    // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
-    __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
       __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
+    __ movq(kScratchRegister,
+            Operand(kScratchRegister, kOffsetToMapCheckValue));
+    __ movq(Operand(kScratchRegister, 0), rax);
   }
 
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
@@ -3791,9 +4083,11 @@
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of true in the root array at the inline check site.
-    ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB0 - 0x100);
-    __ movl(rax, Immediate(0xB0));  // TrueValue is at -10 * kPointerSize.
+    int true_offset = 0x100 +
+        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(true_offset >= 0 && true_offset < 0x100);
+    __ movl(rax, Immediate(true_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3812,9 +4106,11 @@
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of false in the root array at the inline check site.
-    ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB8 - 0x100);
-    __ movl(rax, Immediate(0xB8));  // FalseValue is at -9 * kPointerSize.
+    int false_offset = 0x100 +
+        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(false_offset >= 0 && false_offset < 0x100);
+    __ movl(rax, Immediate(false_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3904,85 +4200,25 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
-
-  // Put smi-tagged index into scratch register.
-  __ movq(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result_, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ SmiToInteger32(index_, index_);
 
-  // Handle non-flat strings.
-  __ and_(result_, Immediate(kStringRepresentationMask));
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmpb(result_, Immediate(kExternalStringTag));
-  __ j(greater, &sliced_string);
-  __ j(equal, &call_runtime_);
+  StringCharLoadGenerator::Generate(
+      masm, object_, index_, result_, &call_runtime_);
 
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, &call_runtime_);
-  // Get the first of the two strings and load its instance type.
-  __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string, Label::kNear);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
-  __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
-  __ bind(&assure_seq_string);
-  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result_, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &call_runtime_);
-  __ jmp(&flat_string);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testb(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  __ SmiToInteger32(scratch_, scratch_);
-  __ movzxwl(result_, FieldOperand(object_,
-                                   scratch_, times_2,
-                                   SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ SmiToInteger32(scratch_, scratch_);
-  __ movzxbl(result_, FieldOperand(object_,
-                                   scratch_, times_1,
-                                   SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
   __ Integer32ToSmi(result_, result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   Factory* factory = masm->isolate()->factory();
@@ -3995,7 +4231,6 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4004,19 +4239,18 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!scratch_.is(rax)) {
+  if (!index_.is(rax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ movq(scratch_, rax);
+    __ movq(index_, rax);
   }
-  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  __ JumpIfNotSmi(index_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4026,6 +4260,7 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ Integer32ToSmi(index_, index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(rax)) {
@@ -4058,7 +4293,8 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -4085,7 +4321,8 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -4566,7 +4803,12 @@
 
     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
-    // Must be null (deleted entry).
+    // Must be the hole (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+      __ cmpq(kScratchRegister, candidate);
+      __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+    }
     __ jmp(&next_probe[i]);
 
     __ bind(&is_string);
@@ -4609,13 +4851,10 @@
                                     Register hash,
                                     Register character,
                                     Register scratch) {
-  // hash = (seed + character) + ((seed + character) << 10);
-  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
-  __ SmiToInteger32(scratch, scratch);
-  __ addl(scratch, character);
-  __ movl(hash, scratch);
-  __ shll(scratch, Immediate(10));
-  __ addl(hash, scratch);
+  // hash = character + (character << 10);
+  __ movl(hash, character);
+  __ shll(hash, Immediate(10));
+  __ addl(hash, character);
   // hash ^= hash >> 6;
   __ movl(scratch, hash);
   __ shrl(scratch, Immediate(6));
@@ -4654,12 +4893,13 @@
   __ shll(scratch, Immediate(15));
   __ addl(hash, scratch);
 
-  __ andl(hash, Immediate(String::kHashBitMask));
+  uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
+  __ andl(hash, Immediate(kHashShiftCutOffMask));
 
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
   __ j(not_zero, &hash_not_zero);
-  __ Set(hash, StringHasher::kZeroHash);
+  __ Set(hash, 27);
   __ bind(&hash_not_zero);
 }
 
@@ -4743,18 +4983,15 @@
     // rbx: instance type
     // rcx: sub string length
     // rdx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
+    Label allocate_slice, sliced_string, seq_or_external_string;
     __ cmpq(rcx, Immediate(SlicedString::kMinLength));
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ testb(rbx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ testb(rbx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
+    __ j(zero, &seq_or_external_string, Label::kNear);
 
     __ testb(rbx, Immediate(kSlicedNotConsMask));
     __ j(not_zero, &sliced_string, Label::kNear);
@@ -4771,8 +5008,8 @@
     __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
     __ jmp(&allocate_slice, Label::kNear);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ movq(rdi, rax);
 
     __ bind(&allocate_slice);
@@ -5276,12 +5513,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss =
       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ push(rdx);
-  __ push(rax);
-  __ Push(Smi::FromInt(op_));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rdx);
+    __ push(rax);
+    __ Push(Smi::FromInt(op_));
+    __ CallExternalReference(miss, 3);
+  }
 
   // Compute the entry point of the rewritten stub.
   __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -5297,13 +5535,12 @@
 }
 
 
-MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register properties,
-    String* name,
-    Register r0) {
+void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register properties,
+                                                        Handle<String> name,
+                                                        Register r0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -5350,12 +5587,10 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ Push(Handle<Object>(name));
   __ push(Immediate(name->Hash()));
-  MaybeObject* result = masm->TryCallStub(&stub);
-  if (result->IsFailure()) return result;
+  __ CallStub(&stub);
   __ testq(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
-  return result;
 }
 
 
@@ -5370,6 +5605,11 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
+  ASSERT(!elements.is(r0));
+  ASSERT(!elements.is(r1));
+  ASSERT(!name.is(r0));
+  ASSERT(!name.is(r1));
+
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -5412,6 +5652,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -5497,6 +5739,364 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { rbx, rax, rdi, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore.
+  { rbx, r8, r9, EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { rdi, rdx, rcx, EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { rdx, rbx, rdi, EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
+  // and ElementsTransitionGenerator::GenerateDoubleToObject
+  { rdx, r11, r15, EMIT_REMEMBERED_SET},
+  // ElementsTransitionGenerator::GenerateDoubleToObject
+  { r11, rax, r15, EMIT_REMEMBERED_SET},
+  // StoreArrayLiteralElementStub::Generate
+  { rbx, rax, rcx, EMIT_REMEMBERED_SET},
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub2(kSaveFPRegs);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+  Register arg3 = r8;
+  Register arg2 = rdx;
+  Register arg1 = rcx;
+#else
+  Register arg3 = rdx;
+  Register arg2 = rsi;
+  Register arg1 = rdi;
+#endif
+  Register address =
+      arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(arg1));
+  __ Move(address, regs_.address());
+  __ Move(arg1, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    // TODO(gc) Can we just set address arg2 in the beginning?
+    __ Move(arg2, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ movq(arg2, Operand(address, 0));
+  }
+  __ LoadAddress(arg3, ExternalReference::isolate_address());
+  int argument_count = 3;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &on_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     zero,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : element value to store
+  //  -- rbx    : array literal
+  //  -- rdi    : map of array literal
+  //  -- rcx    : element index as smi
+  //  -- rdx    : array literal index in function
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(rdi, &double_elements);
+
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(rax, &smi_element);
+  __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+
+  __ bind(&slow_elements);
+  __ pop(rdi);  // Pop return address and remember to put back later for tail
+                // call.
+  __ push(rbx);
+  __ push(rcx);
+  __ push(rax);
+  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+  __ push(rdx);
+  __ push(rdi);  // Return return address so that tail call returns to right
+                 // place.
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ SmiToInteger32(kScratchRegister, rcx);
+  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+                           FixedArrayBase::kHeaderSize));
+  __ movq(Operand(rcx, 0), rax);
+  // Update the write barrier for the array store.
+  __ RecordWrite(rbx, rcx, rax,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ SmiToInteger32(kScratchRegister, rcx);
+  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+                       FixedArrayBase::kHeaderSize), rax);
+  __ ret(0);
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+
+  __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+  __ SmiToInteger32(r11, rcx);
+  __ StoreNumberToDoubleElements(rax,
+                                 r9,
+                                 r11,
+                                 xmm0,
+                                 &slow_elements);
+  __ ret(0);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 4058118..30ef3e8 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -59,6 +59,32 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -124,7 +150,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -210,7 +236,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Code* code) {
+  virtual void FinishCode(Handle<Code> code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -397,13 +423,12 @@
 
   void Generate(MacroAssembler* masm);
 
-  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
-      MacroAssembler* masm,
-      Label* miss,
-      Label* done,
-      Register properties,
-      String* name,
-      Register r0);
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register properties,
+                                     Handle<String> name,
+                                     Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -413,6 +438,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -425,7 +452,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -446,6 +473,246 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always rcx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
+      if (scratch0.is(rcx)) {
+        scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(rcx)) {
+        object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(rcx)) {
+        address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->push(rcx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->movq(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->movq(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with rcx.
+      if (!object_.is(object_orig_)) {
+        masm->movq(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->movq(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->pop(rcx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.
+
+    // The three scratch registers (incl. rcx) will be restored by other means
+    // so we don't bother pushing them here.  Rbx, rbp and r12-15 are callee
+    // save and don't need to be preserved.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always rcx.
+
+    Register GetRegThatIsNotRcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(rcx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 507bbd4..f7e8fc1 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -30,6 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "codegen.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -38,12 +39,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
@@ -139,6 +144,331 @@
 
 #endif
 
+#undef __
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  // The fail label is not actually used since we do not allocate.
+  Label allocated, cow_array;
+
+  // Check backing store for COW-ness.  If the negative case, we do not have to
+  // allocate a new array, since FixedArray and FixedDoubleArray do not differ
+  // in size.
+  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+  __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
+                 Heap::kFixedCOWArrayMapRootIndex);
+  __ j(equal, &cow_array);
+  __ movq(r14, r8);  // Destination array equals source array.
+
+  __ bind(&allocated);
+  // r8 : source FixedArray
+  // r9 : elements array length
+  // r14: destination FixedDoubleArray
+  // Set backing store's map
+  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
+
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Convert smis to doubles and holes to hole NaNs.  The Array's length
+  // remains unchanged.
+  STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
+  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+  Label loop, entry, convert_hole;
+  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  // r15: the-hole NaN
+  __ jmp(&entry);
+
+  // Allocate new array if the source array is a COW array.
+  __ bind(&cow_array);
+  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
+  // Set receiver's backing store.
+  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
+  __ movq(r11, r14);
+  __ RecordWriteField(rdx,
+                      JSObject::kElementsOffset,
+                      r11,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Set backing store's length.
+  __ Integer32ToSmi(r11, r9);
+  __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
+  __ jmp(&allocated);
+
+  // Conversion loop.
+  __ bind(&loop);
+  __ movq(rbx,
+          FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
+  // r9 : current element's index
+  // rbx: current element (smi-tagged)
+  __ JumpIfNotSmi(rbx, &convert_hole);
+  __ SmiToInteger32(rbx, rbx);
+  __ cvtlsi2sd(xmm0, rbx);
+  __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+           xmm0);
+  __ jmp(&entry);
+  __ bind(&convert_hole);
+
+  if (FLAG_debug_code) {
+    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+    __ Assert(equal, "object found in smi-only array");
+  }
+
+  __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
+  __ bind(&entry);
+  __ decq(r9);
+  __ j(not_sign, &loop);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Label* fail) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rbx    : target map
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label loop, entry, convert_hole, gc_required;
+  __ push(rax);
+
+  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
+  // r8 : source FixedDoubleArray
+  // r9 : number of elements
+  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+  __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+  // r11: destination FixedArray
+  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
+  __ Integer32ToSmi(r14, r9);
+  __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
+
+  // Prepare for conversion loop.
+  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
+  // rsi: the-hole NaN
+  // rdi: pointer to the-hole
+  __ jmp(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ pop(rax);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ jmp(fail);
+
+  // Box doubles into heap numbers.
+  __ bind(&loop);
+  __ movq(r14, FieldOperand(r8,
+                            r9,
+                            times_pointer_size,
+                            FixedDoubleArray::kHeaderSize));
+  // r9 : current element's index
+  // r14: current element
+  __ cmpq(r14, rsi);
+  __ j(equal, &convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  __ AllocateHeapNumber(rax, r15, &gc_required);
+  // rax: new heap number
+  __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+  __ movq(FieldOperand(r11,
+                       r9,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          rax);
+  __ movq(r15, r9);
+  __ RecordWriteArray(r11,
+                      rax,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&entry, Label::kNear);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ movq(FieldOperand(r11,
+                       r9,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          rdi);
+
+  __ bind(&entry);
+  __ decq(r9);
+  __ j(not_sign, &loop);
+
+  // Set transitioned map.
+  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+  __ RecordWriteField(rdx,
+                      HeapObject::kMapOffset,
+                      rbx,
+                      rdi,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
+  __ RecordWriteField(rdx,
+                      JSObject::kElementsOffset,
+                      r11,
+                      r15,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(rax);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ testb(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ testb(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ addq(index, result);
+  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+                 Heap::kEmptyStringRootIndex);
+  __ j(not_equal, call_runtime);
+  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label seq_string;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(result, Immediate(kStringRepresentationMask));
+  __ j(zero, &seq_string, Label::kNear);
+
+  // Handle external strings.
+  Label ascii_external, done;
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ testb(result, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  // Rule out short external strings.
+  STATIC_CHECK(kShortExternalStringTag != 0);
+  __ testb(result, Immediate(kShortExternalStringTag));
+  __ j(not_zero, call_runtime);
+  // Check encoding.
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+  __ j(not_equal, &ascii_external, Label::kNear);
+  // Two-byte string.
+  __ movzxwl(result, Operand(result, index, times_2, 0));
+  __ jmp(&done, Label::kNear);
+  __ bind(&ascii_external);
+  // Ascii string.
+  __ movzxbl(result, Operand(result, index, times_1, 0));
+  __ jmp(&done, Label::kNear);
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii;
+  __ bind(&seq_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzxwl(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii);
+  __ movzxbl(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
+}
 
 #undef __
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index a0648ce..2e80751 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -69,6 +69,21 @@
 };
 
 
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 423e6f2..339b961 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -100,65 +100,66 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as as two smis causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    ASSERT(!reg.is(kScratchRegister));
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as as two smis causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      ASSERT(!reg.is(kScratchRegister));
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
+      }
+      // Store the 64-bit value as two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ movq(kScratchRegister, reg);
+        __ Integer32ToSmi(reg, reg);
+        __ push(reg);
+        __ sar(kScratchRegister, Immediate(32));
+        __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+        __ push(kScratchRegister);
+      }
     }
-    // Store the 64-bit value as two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ movq(kScratchRegister, reg);
-      __ Integer32ToSmi(reg, reg);
-      __ push(reg);
-      __ sar(kScratchRegister, Immediate(32));
-      __ Integer32ToSmi(kScratchRegister, kScratchRegister);
-      __ push(kScratchRegister);
-    }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(rax, 0);  // No arguments (argc == 0).
-  __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+    __ Set(rax, 0);  // No arguments (argc == 0).
+    __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, kDebugZapValue);
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      // Reconstruct the 64-bit value from two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(kScratchRegister);
+        __ SmiToInteger32(kScratchRegister, kScratchRegister);
+        __ shl(kScratchRegister, Immediate(32));
+        __ pop(reg);
+        __ SmiToInteger32(reg, reg);
+        __ or_(reg, kScratchRegister);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    // Reconstruct the 64-bit value from two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(kScratchRegister);
-      __ SmiToInteger32(kScratchRegister, kScratchRegister);
-      __ shl(kScratchRegister, Immediate(32));
-      __ pop(reg);
-      __ SmiToInteger32(reg, reg);
-      __ or_(reg, kScratchRegister);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
@@ -249,12 +250,12 @@
 }
 
 
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
   // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- rdi : function
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, 0, false);
+  Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
 }
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index f322312..1fd78fc 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -87,13 +87,19 @@
 #endif
   }
 
+  Isolate* isolate = code->GetIsolate();
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -105,7 +111,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -135,10 +142,14 @@
   *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+                                         Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -153,6 +164,9 @@
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -598,7 +612,10 @@
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -644,8 +661,11 @@
   __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
   __ LoadAddress(arg2, ExternalReference::isolate_address());
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 2);
+  }
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 7012c76..2626954 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,32 +31,32 @@
 namespace v8 {
 namespace internal {
 
-static const int kNumRegs = 16;
-static const RegList kJSCallerSaved =
+const int kNumRegs = 16;
+const RegList kJSCallerSaved =
     1 << 0 |  // rax
     1 << 1 |  // rcx
     1 << 2 |  // rdx
     1 << 3 |  // rbx - used as a caller-saved register in JavaScript code
     1 << 7;   // rdi - callee function
 
-static const int kNumJSCallerSaved = 5;
+const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 // Number of registers for which space is reserved in safepoints.
-static const int kNumSafepointRegisters = 16;
+const int kNumSafepointRegisters = 16;
 
 // ----------------------------------------------------
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset    = 0 * kPointerSize;
-  static const int kContextOffset = 1 * kPointerSize;
-  static const int kFPOffset      = 2 * kPointerSize;
-  static const int kStateOffset   = 3 * kPointerSize;
-  static const int kPCOffset      = 4 * kPointerSize;
+  static const int kNextOffset     = 0 * kPointerSize;
+  static const int kCodeOffset     = 1 * kPointerSize;
+  static const int kStateOffset    = 2 * kPointerSize;
+  static const int kContextOffset  = 3 * kPointerSize;
+  static const int kFPOffset       = 4 * kPointerSize;
 
-  static const int kSize = kPCOffset + kPointerSize;
+  static const int kSize = kFPOffset + kPointerSize;
 };
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 556523f..963912f 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -44,11 +44,6 @@
 #define __ ACCESS_MASM(masm_)
 
 
-static unsigned GetPropertyId(Property* property) {
-  return property->id();
-}
-
-
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -122,6 +117,8 @@
   ASSERT(info_ == NULL);
   info_ = info;
   scope_ = info->scope();
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -136,7 +133,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). rcx is zero for method calls and non-zero for
   // function calls.
-  if (info->is_strict_mode() || info->is_native()) {
+  if (!info->is_classic_mode() || info->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -147,6 +144,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
@@ -195,11 +197,9 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier.  This clobbers rax and rbx.
+        __ RecordWriteContextSlot(
+            rsi, context_offset, rax, rbx, kDontSaveFPRegs);
       }
     }
   }
@@ -227,8 +227,8 @@
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub stub(
-        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
-                         : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
+        is_classic_mode() ? ArgumentsAccessStub::NEW_NON_STRICT_SLOW
+                          : ArgumentsAccessStub::NEW_STRICT);
     __ CallStub(&stub);
 
     SetVar(arguments, rax, rbx, rdx);
@@ -251,7 +251,10 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         int ignored = 0;
-        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+        VariableProxy* proxy = scope()->function();
+        ASSERT(proxy->var()->mode() == CONST ||
+               proxy->var()->mode() == CONST_HARMONY);
+        EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -377,7 +380,7 @@
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -399,7 +402,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -432,7 +435,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -491,7 +494,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -555,7 +558,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
                                           true,
                                           true_label_,
                                           false_label_);
@@ -638,15 +641,16 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ movq(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -657,13 +661,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-
-  ForwardBailoutStack* current = forward_bailout_stack_;
-  while (current != NULL) {
-    PrepareForBailout(current->expr(), state);
-    current = current->parent();
-  }
-
+  PrepareForBailout(expr, TOS_REG);
   if (should_normalize) {
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     Split(equal, if_true, if_false, NULL);
@@ -673,13 +671,15 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        Variable::Mode mode,
+                                        VariableMode mode,
                                         FunctionLiteral* function,
                                         int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
+  bool binding_needs_init = (function == NULL) &&
+      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
       ++(*global_count);
@@ -691,7 +691,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ movq(StackOperand(variable), result_register());
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(StackOperand(variable), kScratchRegister);
@@ -715,10 +715,16 @@
         VisitForAccumulatorValue(function);
         __ movq(ContextOperand(rsi, variable->index()), result_register());
         int offset = Context::SlotOffset(variable->index());
-        __ movq(rbx, rsi);
-        __ RecordWrite(rbx, offset, result_register(), rcx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(rsi,
+                                  offset,
+                                  result_register(),
+                                  rcx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -731,11 +737,13 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(rsi);
       __ Push(variable->name());
-      // Declaration nodes are always introduced in one of three modes.
-      ASSERT(mode == Variable::VAR ||
-             mode == Variable::CONST ||
-             mode == Variable::LET);
-      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR ||
+             mode == CONST ||
+             mode == CONST_HARMONY ||
+             mode == LET);
+      PropertyAttributes attr =
+          (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
       __ Push(Smi::FromInt(attr));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -743,7 +751,7 @@
       // must not destroy the current value.
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (mode == Variable::CONST || mode == Variable::LET) {
+      } else if (binding_needs_init) {
         __ PushRoot(Heap::kTheHoleValueRootIndex);
       } else {
         __ Push(Smi::FromInt(0));  // Indicates no initial value.
@@ -882,11 +890,17 @@
   __ bind(&done_convert);
   __ push(rax);
 
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+  __ j(below_equal, &call_runtime);
+
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  Label next, call_runtime;
+  Label next;
   Register empty_fixed_array_value = r8;
   __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Register empty_descriptor_array_value = r9;
@@ -962,9 +976,17 @@
   __ jmp(&loop);
 
   // We got a fixed array in register rax. Iterate through that.
+  Label non_proxy;
   __ bind(&fixed_array);
-  __ Push(Smi::FromInt(0));  // Map (0) - force slow check.
-  __ push(rax);
+  __ Move(rbx, Smi::FromInt(1));  // Smi indicates slow check
+  __ movq(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
+  __ j(above, &non_proxy);
+  __ Move(rbx, Smi::FromInt(0));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ push(rbx);  // Smi
+  __ push(rax);  // Array
   __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ push(rax);  // Fixed array length (as smi).
   __ Push(Smi::FromInt(0));  // Initial index.
@@ -983,17 +1005,22 @@
                             index.scale,
                             FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a zero map in the
+  // Get the expected map from the stack or a smi in the
   // permanent slow case into register rdx.
   __ movq(rdx, Operand(rsp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
+  // If not, we may have to filter the key.
   Label update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  __ Cmp(rdx, Smi::FromInt(0));
+  __ j(equal, &update_each, Label::kNear);
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1047,7 +1074,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(info->language_mode());
     __ Push(info);
     __ CallStub(&stub);
   } else {
@@ -1077,7 +1104,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1091,7 +1118,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1137,7 +1164,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
+      if (s->calls_non_strict_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1168,16 +1195,23 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+  if (var->mode() == DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+  } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == Variable::CONST) {
+    if (local->mode() == CONST ||
+        local->mode() == CONST_HARMONY ||
+        local->mode() == LET) {
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
       __ j(not_equal, done);
-      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      if (local->mode() == CONST) {
+        __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      } else {  // LET || CONST_HARMONY
+        __ Push(var->name());
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
     }
     __ jmp(done);
   }
@@ -1208,23 +1242,63 @@
     case Variable::LOCAL:
     case Variable::CONTEXT: {
       Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
-      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
-        context()->Plug(var);
-      } else {
-        // Let and const need a read barrier.
-        Label done;
-        GetVar(rax, var);
-        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-        __ j(not_equal, &done, Label::kNear);
-        if (var->mode() == Variable::LET) {
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kThrowReferenceError, 1);
-        } else {  // Variable::CONST
-          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        ASSERT(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST &&
+              var->initializer_position() < proxy->position();
         }
-        __ bind(&done);
-        context()->Plug(rax);
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          Label done;
+          GetVar(rax, var);
+          __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+          __ j(not_equal, &done, Label::kNear);
+          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ Push(var->name());
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            ASSERT(var->mode() == CONST);
+            __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+          }
+          __ bind(&done);
+          context()->Plug(rax);
+          break;
+        }
       }
+      context()->Plug(var);
       break;
     }
 
@@ -1302,10 +1376,11 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
+  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(expr->constant_properties());
+  __ Push(constant_properties);
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1313,10 +1388,15 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ Push(Smi::FromInt(flags));
+  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    __ CallStub(&stub);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1350,9 +1430,9 @@
             VisitForAccumulatorValue(value);
             __ Move(rcx, key->handle());
             __ movq(rdx, Operand(rsp, 0));
-            Handle<Code> ic = is_strict_mode()
-                ? isolate()->builtins()->StoreIC_Initialize_Strict()
-                : isolate()->builtins()->StoreIC_Initialize();
+            Handle<Code> ic = is_classic_mode()
+                ? isolate()->builtins()->StoreIC_Initialize()
+                : isolate()->builtins()->StoreIC_Initialize_Strict();
             __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1404,24 +1484,42 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(expr->constant_elements());
-  if (expr->constant_elements()->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  __ Push(constant_elements);
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
+           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+           FLAG_smi_only_arrays);
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
 
@@ -1444,14 +1542,28 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    // Store the subexpression value in the array's elements.
-    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
-    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
-    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-    __ movq(FieldOperand(rbx, offset), result_register());
-
-    // Update the write barrier for the array store.
-    __ RecordWrite(rbx, offset, result_register(), rcx);
+    if (constant_elements_kind == FAST_ELEMENTS) {
+      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
+      // transition and don't need to call the runtime stub.
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+      __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+      // Store the subexpression value in the array's elements.
+      __ movq(FieldOperand(rbx, offset), result_register());
+      // Update the write barrier for the array store.
+      __ RecordWriteField(rbx, offset, result_register(), rcx,
+                          kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          INLINE_SMI_CHECK);
+    } else {
+      // Store the subexpression value in the array's elements.
+      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+      __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+      __ Move(rcx, Smi::FromInt(i));
+      __ Move(rdx, Smi::FromInt(expr->literal_index()));
+      StoreArrayLiteralElementStub stub;
+      __ CallStub(&stub);
+    }
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1582,14 +1694,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1698,9 +1810,9 @@
       __ movq(rdx, rax);
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
@@ -1711,9 +1823,9 @@
       __ movq(rcx, rax);
       __ pop(rdx);
       __ pop(rax);  // Restore value.
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic);
       break;
     }
@@ -1729,9 +1841,9 @@
     // Global var, const, or let.
     __ Move(rcx, var->name());
     __ movq(rdx, GlobalObjectOperand());
-    Handle<Code> ic = is_strict_mode()
-        ? isolate()->builtins()->StoreIC_Initialize_Strict()
-        : isolate()->builtins()->StoreIC_Initialize();
+    Handle<Code> ic = is_classic_mode()
+        ? isolate()->builtins()->StoreIC_Initialize()
+        : isolate()->builtins()->StoreIC_Initialize_Strict();
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1756,13 +1868,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ Push(Smi::FromInt(language_mode()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1777,12 +1889,14 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     }
 
-  } else if (var->mode() != Variable::CONST) {
-    // Assignment to var or initializing assignment to let.
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+    // Assignment to var or initializing assignment to let/const
+    // in harmony mode.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, rcx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1795,14 +1909,15 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(strict_mode_flag()));
+      __ Push(Smi::FromInt(language_mode()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -1834,9 +1949,9 @@
   } else {
     __ pop(rdx);
   }
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->StoreIC_Initialize()
+      : isolate()->builtins()->StoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1874,9 +1989,9 @@
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_strict_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic = is_classic_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize()
+      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1981,6 +2096,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
+  __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -1990,8 +2106,7 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
-                                                      int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(rsp, arg_count * kPointerSize));
@@ -2002,17 +2117,14 @@
   // Push the receiver of the enclosing function and do runtime call.
   __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
 
-  // Push the strict mode flag. In harmony mode every eval call
-  // is a strict mode eval call.
-  StrictModeFlag strict_mode = strict_mode_flag();
-  if (FLAG_harmony_block_scoping) {
-    strict_mode = kStrictMode;
-  }
-  __ Push(Smi::FromInt(strict_mode));
+  // Push the language mode.
+  __ Push(Smi::FromInt(language_mode()));
 
-  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
-                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
-                 : Runtime::kResolvePossiblyDirectEval, 4);
+  // Push the start position of the scope the calls resides in.
+  __ Push(Smi::FromInt(scope()->start_position()));
+
+  // Do the runtime call.
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2043,27 +2155,10 @@
         VisitForStackValue(args->at(i));
       }
 
-      // If we know that eval can only be shadowed by eval-introduced
-      // variables we attempt to load the global eval function directly in
-      // generated code. If we succeed, there is no need to perform a
-      // context lookup in the runtime system.
-      Label done;
-      Variable* var = proxy->var();
-      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
-        Label slow;
-        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
-        // Push the function and resolve eval.
-        __ push(rax);
-        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
-        __ jmp(&done);
-        __ bind(&slow);
-      }
-
       // Push a copy of the function (found below the arguments) and resolve
       // eval.
       __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
-      __ bind(&done);
+      EmitResolvePossiblyDirectEval(arg_count);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -2073,6 +2168,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2182,7 +2278,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2194,7 +2291,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ JumpIfSmi(rax, if_true);
   __ jmp(if_false);
 
@@ -2202,7 +2299,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2214,7 +2312,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
   Split(non_negative_smi, if_true, if_false, fall_through);
 
@@ -2222,7 +2320,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2246,14 +2345,15 @@
   __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, if_false);
   __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2267,14 +2367,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2290,7 +2391,7 @@
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2298,7 +2399,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2374,12 +2476,13 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2393,14 +2496,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2414,14 +2518,15 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2435,7 +2540,7 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2443,8 +2548,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2467,14 +2572,15 @@
   __ bind(&check_frame_marker);
   __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
          Smi::FromInt(StackFrame::CONSTRUCT));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2490,14 +2596,15 @@
 
   __ pop(rbx);
   __ cmpq(rax, rbx);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
@@ -2511,8 +2618,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2534,7 +2641,8 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2545,20 +2653,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
   // Map is now in rax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &non_function_constructor);
@@ -2590,7 +2702,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2598,6 +2710,7 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2610,8 +2723,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
+  ASSERT(expr->arguments()->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2630,9 +2743,12 @@
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
   __ PrepareCallCFunction(1);
 #ifdef _WIN64
-  __ LoadAddress(rcx, ExternalReference::isolate_address());
+  __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+
 #else
-  __ LoadAddress(rdi, ExternalReference::isolate_address());
+  __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
+  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
 #endif
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
@@ -2652,9 +2768,10 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2664,9 +2781,10 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2677,7 +2795,8 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2695,8 +2814,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2706,7 +2826,8 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2726,14 +2847,15 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ movq(rdx, rax);
-  __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+  __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2745,7 +2867,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2763,7 +2886,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2771,7 +2895,6 @@
 
   Register object = rbx;
   Register index = rax;
-  Register scratch = rcx;
   Register result = rdx;
 
   __ pop(object);
@@ -2781,7 +2904,6 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
-                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -2810,7 +2932,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2818,8 +2941,7 @@
 
   Register object = rbx;
   Register index = rax;
-  Register scratch1 = rcx;
-  Register scratch2 = rdx;
+  Register scratch = rdx;
   Register result = rax;
 
   __ pop(object);
@@ -2829,8 +2951,7 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch1,
-                                  scratch2,
+                                  scratch,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -2859,7 +2980,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2871,7 +2993,8 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -2883,10 +3006,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2894,10 +3018,11 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2905,10 +3030,23 @@
 }
 
 
-void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2916,8 +3054,9 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
   // Load the argument on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -2925,7 +3064,8 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -2934,18 +3074,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
+  // Check for proxy.
+  Label proxy, done;
+  __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
+  __ j(equal, &proxy);
+
   // InvokeFunction requires the function in rdi. Move it in there.
   __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&proxy);
+  __ push(rax);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2955,7 +3108,8 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3010,14 +3164,33 @@
   __ movq(Operand(index_2, 0), object);
   __ movq(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ movq(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
   __ addq(rsp, Immediate(3 * kPointerSize));
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3031,7 +3204,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3087,7 +3261,8 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
 
   Register right = rax;
@@ -3125,7 +3300,8 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3139,7 +3315,7 @@
 
   __ testl(FieldOperand(rax, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
   __ j(zero, if_true);
   __ jmp(if_false);
 
@@ -3147,7 +3323,8 @@
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3163,10 +3340,11 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
   Label bailout, return_result, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3496,14 +3674,16 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ Push(Smi::FromInt(strict_mode_flag()));
+        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
+            ? kNonStrictMode : kStrictMode;
+        __ Push(Smi::FromInt(strict_mode_flag));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(rax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ Push(var->name());
@@ -3545,17 +3725,41 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
       } else {
-        Label materialize_true, materialize_false;
-        Label* if_true = NULL;
-        Label* if_false = NULL;
-        Label* fall_through = NULL;
-        // Notice that the labels are swapped.
-        context()->PrepareTest(&materialize_true, &materialize_false,
-                               &if_false, &if_true, &fall_through);
-        if (context()->IsTest()) ForwardBailoutToChild(expr);
-        VisitForControl(expr->expression(), if_true, if_false, fall_through);
-        context()->Plug(if_false, if_true);  // Labels swapped.
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kTrueValueRootIndex);
+        }
+        __ jmp(&done, Label::kNear);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        if (context()->IsAccumulatorValue()) {
+          __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+        } else {
+          __ PushRoot(Heap::kFalseValueRootIndex);
+        }
+        __ bind(&done);
       }
       break;
     }
@@ -3760,9 +3964,9 @@
     case NAMED_PROPERTY: {
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->StoreIC_Initialize_Strict()
-          : isolate()->builtins()->StoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->StoreIC_Initialize()
+          : isolate()->builtins()->StoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3777,9 +3981,9 @@
     case KEYED_PROPERTY: {
       __ pop(rcx);
       __ pop(rdx);
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      Handle<Code> ic = is_classic_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize()
+          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3827,20 +4031,25 @@
     context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInCurrentContext(expr);
+    VisitInDuplicateContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(expr);
+    VisitForTypeofValue(sub_expr);
   }
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(rax, if_true);
@@ -3875,9 +4084,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(rax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3895,18 +4106,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3914,6 +4114,10 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
   Label materialize_true, materialize_false;
@@ -3923,20 +4127,13 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -3945,7 +4142,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
        // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -3959,33 +4156,25 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
-          __ pop(rdx);
           break;
         case Token::LT:
           cc = less;
-          __ pop(rdx);
           break;
         case Token::GT:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = less;
-          __ movq(rdx, result_register());
-          __ pop(rax);
+          cc = greater;
          break;
         case Token::LTE:
-          // Reverse left and right sizes to obtain ECMA-262 conversion order.
-          cc = greater_equal;
-          __ movq(rdx, result_register());
-          __ pop(rax);
+          cc = less_equal;
           break;
         case Token::GTE:
           cc = greater_equal;
-          __ pop(rdx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
+      __ pop(rdx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4005,7 +4194,7 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       __ testq(rax, rax);
       Split(cc, if_true, if_false, fall_through);
     }
@@ -4017,8 +4206,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4026,14 +4216,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(rax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ j(equal, if_true);
-    __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(rax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(rax, if_false);
     // It can be an undetectable object.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9d55594..3a57753 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -221,7 +221,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ movq(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0);
+  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
 }
 
 
@@ -531,14 +531,12 @@
 
   Register receiver = rdx;
   Register index = rax;
-  Register scratch1 = rbx;
-  Register scratch2 = rcx;
+  Register scratch = rcx;
   Register result = rax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch1,
-                                          scratch2,
+                                          scratch,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -606,45 +604,40 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, slow_with_tagged_index, fast, array, extra;
+  Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow_with_tagged_index);
   // Get the map from the receiver.
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
   __ SmiToInteger32(rcx, rcx);
 
-  __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
+  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(rbx, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
+  // Check array bounds.
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   // rax: value
   // rbx: FixedArray
   // rcx: index
-  __ j(above, &fast);
+  __ j(above, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -666,9 +659,20 @@
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   __ j(below_equal, &slow);
   // Increment index to get new length.
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_extra_double);
   __ leal(rdi, Operand(rcx, 1));
   __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_extra_double);
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ leal(rdi, Operand(rcx, 1));
+  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -678,9 +682,6 @@
   // rdx: receiver (a JSArray)
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
@@ -688,30 +689,54 @@
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
-  __ bind(&fast);
+  __ bind(&fast_object_with_map_check);
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
+  // rdx: receiver (a JSArray)
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
   Label non_smi_value;
+  __ JumpIfNotSmi(rax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
-  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
+
   __ bind(&non_smi_value);
-  // Slow case that needs to retain rcx for use by RecordWrite.
-  // Update write barrier for the elements array address.
-  __ movq(rdx, rax);
-  __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
+  // Writing a non-smi, check whether array allows non-smi elements.
+  // r9: receiver's map
+  __ CheckFastObjectElements(r9, &slow, Label::kNear);
+  __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ movq(rdx, rax);  // Preserve the value which is returned.
+  __ RecordWriteArray(
+      rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
   __ ret(0);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                          int argc,
-                                          Code::Kind kind,
-                                          Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                               int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rdx                      : receiver
@@ -721,7 +746,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_ic_state,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -794,7 +819,7 @@
 
 
 // The generated code falls through if the call should be handled by runtime.
-static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                    : function name
   // rsp[0]                 : return address
@@ -821,10 +846,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm,
-                             int argc,
-                             IC::UtilityId id,
-                             Code::ExtraICState extra_ic_state) {
+void CallICBase::GenerateMiss(MacroAssembler* masm,
+                              int argc,
+                              IC::UtilityId id,
+                              Code::ExtraICState extra_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -846,21 +871,22 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(rdx);
-  __ push(rcx);
+    // Push the receiver and the name of the function.
+    __ push(rdx);
+    __ push(rcx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ Set(rax, 2);
-  __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ Set(rax, 2);
+    __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+    __ CallStub(&stub);
 
-  // Move result to rdi and exit the internal frame.
-  __ movq(rdi, rax);
-  __ LeaveInternalFrame();
+    // Move result to rdi and exit the internal frame.
+    __ movq(rdi, rax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -881,7 +907,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -913,39 +939,6 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc, Code::kNoExtraICState);
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm,
-                          int argc,
-                          Code::ExtraICState extra_ic_state) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
-}
-
-
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                      : function name
@@ -1002,13 +995,14 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(rcx);  // save the key
-  __ push(rdx);  // pass the receiver
-  __ push(rcx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(rcx);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rcx);  // save the key
+    __ push(rdx);  // pass the receiver
+    __ push(rcx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(rcx);  // restore the key
+  }
   __ movq(rdi, rax);
   __ jmp(&do_call);
 
@@ -1072,27 +1066,12 @@
   __ JumpIfSmi(rcx, &miss);
   Condition cond = masm->IsObjectStringType(rcx, rax, rax);
   __ j(NegateCondition(cond), &miss);
-  GenerateCallNormal(masm, argc);
+  CallICBase::GenerateNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
-void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  // ----------- S t a t e -------------
-  // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
-  // -----------------------------------
-
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
-}
-
-
 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
                                              Register object,
                                              Register key,
@@ -1212,7 +1191,12 @@
   __ movq(mapped_location, rax);
   __ lea(r9, mapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in rbx.
@@ -1221,7 +1205,12 @@
   __ movq(unmapped_location, rax);
   __ lea(r9, unmapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1562,6 +1551,51 @@
 }
 
 
+void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rbx     : target map
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
+    __ movq(rax, rdx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rbx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
+}
+
+
+void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
+    MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rbx     : target map
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  // Must return the modified receiver in eax.
+  if (!FLAG_trace_elements_transitions) {
+    Label fail;
+    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+    __ movq(rax, rdx);
+    __ Ret();
+    __ bind(&fail);
+  }
+
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rbx);  // return address
+  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
+}
+
+
 #undef __
 
 
@@ -1573,11 +1607,9 @@
     case Token::LT:
       return less;
     case Token::GT:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return less;
+      return greater;
     case Token::LTE:
-      // Reverse left and right operands to obtain ECMA-262 conversion order.
-      return greater_equal;
+      return less_equal;
     case Token::GTE:
       return greater_equal;
     default:
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index b82dc54..82eabac 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -70,6 +70,12 @@
   HPhase phase("Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -133,7 +139,7 @@
   // when called as functions (without an explicit receiver
   // object). rcx is zero for method calls and non-zero for function
   // calls.
-  if (info_->is_strict_mode() || info_->is_native()) {
+  if (!info_->is_classic_mode() || info_->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -205,11 +211,8 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier. This clobbers rax and rbx.
+        __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -260,6 +263,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -322,6 +328,12 @@
 }
 
 
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  return value->Number();
+}
+
+
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   Handle<Object> literal = chunk_->LookupLiteral(op);
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -611,7 +623,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
@@ -1457,39 +1469,51 @@
 }
 
 
-void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
-  if (right->IsConstantOperand()) {
-    int32_t value = ToInteger32(LConstantOperand::cast(right));
-    if (left->IsRegister()) {
-      __ cmpl(ToRegister(left), Immediate(value));
-    } else {
-      __ cmpl(ToOperand(left), Immediate(value));
-    }
-  } else if (right->IsRegister()) {
-    __ cmpl(ToRegister(left), ToRegister(right));
-  } else {
-    __ cmpl(ToRegister(left), ToOperand(right));
-  }
-}
-
-
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  if (instr->is_double()) {
-    // Don't base result on EFLAGS when a NaN is involved. Instead
-    // jump to the false block.
-    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-  } else {
-    EmitCmpI(left, right);
-  }
-
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
-  EmitBranch(true_block, false_block, cc);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block =
+      EvalComparison(instr->op(), left_val, right_val) ? true_block
+                                                       : false_block;
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      int32_t value;
+      if (right->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(right));
+        __ cmpl(ToRegister(left), Immediate(value));
+      } else if (left->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(left));
+        if (right->IsRegister()) {
+          __ cmpl(ToRegister(right), Immediate(value));
+        } else {
+          __ cmpl(ToOperand(right), Immediate(value));
+        }
+        // We transposed the operands. Reverse the condition.
+        cc = ReverseCondition(cc);
+      } else {
+        if (right->IsRegister()) {
+          __ cmpl(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpl(ToRegister(left), ToOperand(right));
+        }
+      }
+    }
+    EmitBranch(true_block, false_block, cc);
+  }
 }
 
 
@@ -1514,30 +1538,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
   if (instr->hydrogen()->representation().IsSpecialization() ||
       instr->hydrogen()->type().IsSmi()) {
-    // If the expression is known to untagged or smi, then it's definitely
-    // not null, and it can't be a an undetectable object.
-    // Jump directly to the false block.
     EmitGoto(false_block);
     return;
   }
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  __ CompareRoot(reg, Heap::kNullValueRootIndex);
-  if (instr->is_strict()) {
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1590,6 +1617,30 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1621,6 +1672,21 @@
 }
 
 
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = TokenToCondition(op, false);
+  __ testq(rax, rax);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1689,30 +1755,39 @@
                                Label* is_false,
                                Handle<String> class_name,
                                Register input,
-                               Register temp) {
+                               Register temp,
+                               Register scratch) {
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzxbl(scratch, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ subq(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmpq(scratch, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                               FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1741,6 +1816,7 @@
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1749,7 +1825,7 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  EmitClassOfTest(true_label, false_label, class_name, input, temp);
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
 
   EmitBranch(true_block, false_block, equal);
 }
@@ -1790,9 +1866,8 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1816,9 +1891,10 @@
   Register map = ToRegister(instr->TempAt(0));
   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  __ movq(kScratchRegister, factory()->the_hole_value(),
-          RelocInfo::EMBEDDED_OBJECT);
-  __ cmpq(map, kScratchRegister);  // Patched to cached map.
+  Handle<JSGlobalPropertyCell> cache_cell =
+      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
+  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+  __ cmpq(map, Operand(kScratchRegister, 0));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Patched to load either true or false.
   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -1900,9 +1976,6 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
-  if (op == Token::GT || op == Token::LTE) {
-    condition = ReverseCondition(condition);
-  }
   Label true_value, done;
   __ testq(rax, rax);
   __ j(condition, &true_value, Label::kNear);
@@ -1936,7 +2009,7 @@
     __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
     __ movq(result, Operand(result, 0));
   }
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
@@ -1956,25 +2029,26 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register object = ToRegister(instr->TempAt(0));
+  Register address = ToRegister(instr->TempAt(1));
   Register value = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(!value.is(temp));
-  bool check_hole = instr->hydrogen()->check_hole_value();
-  if (!check_hole && value.is(rax)) {
-    __ store_rax(instr->hydrogen()->cell().location(),
-                 RelocInfo::GLOBAL_PROPERTY_CELL);
-    return;
-  }
+  ASSERT(!value.is(object));
+  Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+  __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-  if (check_hole) {
-    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
-  __ movq(Operand(temp, 0), value);
+
+  // Store the value.
+  __ movq(Operand(address, 0), value);
+  // Cells are always rescanned, so no write barrier here.
 }
 
 
@@ -1983,7 +2057,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2001,10 +2075,19 @@
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
   __ movq(ContextOperand(context, instr->slot_index()), value);
-  if (instr->needs_write_barrier()) {
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     int offset = Context::SlotOffset(instr->slot_index());
     Register scratch = ToRegister(instr->TempAt(0));
-    __ RecordWrite(context, offset, value, scratch);
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              scratch,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
   }
 }
 
@@ -2025,7 +2108,7 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   type->LookupInDescriptors(NULL, *name, &lookup);
   ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
@@ -2223,17 +2306,15 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(),
-        instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2305,6 +2386,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2466,7 +2548,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  LoadHeapObject(result, instr->hydrogen()->closure());
 }
 
 
@@ -2618,6 +2700,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2821,6 +2904,14 @@
 }
 
 
+void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::TAN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -2860,6 +2951,9 @@
     case kMathSin:
       DoMathSin(instr);
       break;
+    case kMathTan:
+      DoMathTan(instr);
+      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -2909,13 +3003,13 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ Drop(1);
 }
 
 
@@ -2963,21 +3057,36 @@
   }
 
   // Do the store.
+  HType type = instr->hydrogen()->value()->type();
+  SmiCheck check_needed =
+      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ movq(FieldOperand(object, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          temp,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ movq(FieldOperand(temp, offset), value);
-    if (instr->needs_write_barrier()) {
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp,
+                          offset,
+                          value,
+                          object,
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          check_needed);
     }
   }
 }
@@ -2988,7 +3097,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3025,6 +3134,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3060,6 +3170,13 @@
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    Condition cc = masm()->CheckSmi(value);
+    DeoptimizeIf(NegateCondition(cc), instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3076,12 +3193,20 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    HType type = instr->hydrogen()->value()->type();
+    SmiCheck check_needed =
+        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, FieldOperand(elements,
                              key,
                              times_pointer_size,
                              FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed);
   }
 }
 
@@ -3110,13 +3235,54 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = instr->strict_mode()
+  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register new_map_reg = ToRegister(instr->new_map_reg());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = from_map->elements_kind();
+  ElementsKind to_kind = to_map->elements_kind();
+
+  Label not_applicable;
+  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable);
+  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+    // Write barrier.
+    ASSERT_NE(instr->temp_reg(), NULL);
+    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
+                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+      to_kind == FAST_DOUBLE_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(rdx));
+    ASSERT(new_map_reg.is(rbx));
+    __ movq(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
+             RelocInfo::CODE_TARGET, instr);
+  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+    Register fixed_object_reg = ToRegister(instr->temp_reg());
+    ASSERT(fixed_object_reg.is(rdx));
+    ASSERT(new_map_reg.is(rbx));
+    __ movq(fixed_object_reg, object_reg);
+    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
+             RelocInfo::CODE_TARGET, instr);
+  } else {
+    UNREACHABLE();
+  }
+  __ bind(&not_applicable);
+}
+
+
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   EmitPushTaggedOperand(instr->left());
   EmitPushTaggedOperand(instr->right());
@@ -3131,85 +3297,19 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ testb(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ testb(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ addq(index, result);
-  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, deferred->entry());
-  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(not_zero, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzxwl(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ movzxbl(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3251,6 +3351,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3327,6 +3428,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3422,16 +3524,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3480,6 +3572,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3716,6 +3818,11 @@
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
+  ASSERT_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+
   // Setup the parameters to the stub/runtime call.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
@@ -3736,26 +3843,108 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        constant_elements_kind == FAST_DOUBLE_ELEMENTS
+        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+                            Register result,
+                            Register source,
+                            int* offset) {
+  ASSERT(!source.is(rcx));
+  ASSERT(!result.is(rcx));
+
+  // Increase the offset so that subsequent objects end up right after
+  // this one.
+  int current_offset = *offset;
+  int size = object->map()->instance_size();
+  *offset += size;
+
+  // Copy object header.
+  ASSERT(object->properties()->length() == 0);
+  ASSERT(object->elements()->length() == 0 ||
+         object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+  int inobject_properties = object->map()->inobject_properties();
+  int header_size = size - inobject_properties * kPointerSize;
+  for (int i = 0; i < header_size; i += kPointerSize) {
+    __ movq(rcx, FieldOperand(source, i));
+    __ movq(FieldOperand(result, current_offset + i), rcx);
+  }
+
+  // Copy in-object properties.
+  for (int i = 0; i < inobject_properties; i++) {
+    int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      __ lea(rcx, Operand(result, *offset));
+      __ movq(FieldOperand(result, total_offset), rcx);
+      LoadHeapObject(source, value_object);
+      EmitDeepCopy(value_object, result, source, offset);
+    } else if (value->IsHeapObject()) {
+      LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+      __ movq(FieldOperand(result, total_offset), rcx);
+    } else {
+      __ movq(rcx, value, RelocInfo::NONE);
+      __ movq(FieldOperand(result, total_offset), rcx);
+    }
+  }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+  int size = instr->hydrogen()->total_size();
+
+  // Allocate all objects that are part of the literal in one big
+  // allocation. This avoids multiple limit checks.
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ Push(Smi::FromInt(size));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+  __ bind(&allocated);
+  int offset = 0;
+  LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
+  ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+  Handle<FixedArray> constant_properties =
+      instr->hydrogen()->constant_properties();
+
   // Setup the parameters to the stub/runtime call.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  __ Push(instr->hydrogen()->constant_properties());
-  __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+  __ Push(constant_properties);
+  int flags = instr->hydrogen()->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= instr->hydrogen()->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ Push(Smi::FromInt(flags));
 
   // Pick the right runtime function to call.
+  int properties_count = constant_properties->length() / 2;
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else {
+  } else if (flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  } else {
+    FastCloneShallowObjectStub stub(properties_count);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
@@ -3825,8 +4014,7 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
+    FastNewClosureStub stub(shared_info->language_mode());
     __ Push(shared_info);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -3866,12 +4054,11 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
-                                                  input,
-                                                  instr->type_literal());
-
-  EmitBranch(true_block, false_block, final_branch_condition);
+  Condition final_branch_condition =
+      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
+  if (final_branch_condition != no_condition) {
+    EmitBranch(true_block, false_block, final_branch_condition);
+  }
 }
 
 
@@ -3916,9 +4103,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -3936,7 +4126,6 @@
     final_branch_condition = zero;
 
   } else {
-    final_branch_condition = never;
     __ jmp(false_label);
   }
 
@@ -4051,6 +4240,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 43c045f..7bd7fe6 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -78,6 +78,7 @@
   XMMRegister ToDoubleRegister(LOperand* op) const;
   bool IsInteger32Constant(LConstantOperand* op) const;
   int ToInteger32(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
   bool IsTaggedConstant(LConstantOperand* op) const;
   Handle<Object> ToHandle(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op) const;
@@ -126,8 +127,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  int strict_mode_flag() const {
-    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
+  StrictModeFlag strict_mode_flag() const {
+    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -140,7 +141,8 @@
                        Label* if_false,
                        Handle<String> class_name,
                        Register input,
-                       Register temporary);
+                       Register temporary,
+                       Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
   int GetParameterCount() const { return scope()->num_parameters(); }
@@ -189,9 +191,8 @@
                                int argc,
                                LInstruction* instr);
 
-
   // Generate a direct call to a known function.  Expects the function
-  // to be in edi.
+  // to be in rdi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
@@ -230,6 +231,7 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -248,7 +250,6 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
-  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
@@ -257,8 +258,10 @@
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label, Label* false_label,
-                         Register input, Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -267,6 +270,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -280,6 +290,13 @@
   // register, or a stack slot operand.
   void EmitPushTaggedOperand(LOperand* operand);
 
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -346,16 +363,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -366,6 +387,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5fc5646..b486fae 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -214,10 +214,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -229,6 +230,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -243,6 +251,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -446,6 +462,12 @@
 }
 
 
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
   LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
@@ -706,7 +728,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -736,7 +760,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasSideEffects()) {
+  if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -748,7 +772,8 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -806,28 +831,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBit(Token::Value op,
-                                   HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new LBitI(op, left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), rdx);
-    LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, rax), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsTagged()) {
@@ -989,10 +992,13 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
@@ -1002,7 +1008,6 @@
                                           argument_count_,
                                           value_count,
                                           outer);
-  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1011,7 +1016,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1201,8 +1206,9 @@
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new LCallFunction();
+  LCallFunction* result = new LCallFunction(function);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1228,8 +1234,24 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
-  return DoBit(Token::BIT_AND, instr);
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
 }
 
 
@@ -1242,16 +1264,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
-  return DoBit(Token::BIT_OR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
-  return DoBit(Token::BIT_XOR, instr);
-}
-
-
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1391,12 +1403,10 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  bool reversed = (op == Token::GT || op == Token::LTE);
-  LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
-  LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
   LCmpT* result = new LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
@@ -1408,15 +1418,22 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     return new LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
+    LOperand* left;
+    LOperand* right;
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      left = UseRegisterOrConstantAtStart(instr->left());
+      right = UseRegisterOrConstantAtStart(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
     return new LCmpIDAndBranch(left, right);
   }
 }
@@ -1436,10 +1453,10 @@
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
@@ -1449,6 +1466,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   return new LIsSmiAndBranch(Use(instr->value()));
@@ -1463,6 +1487,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
@@ -1489,6 +1526,7 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister(),
                                    TempRegister());
 }
 
@@ -1716,7 +1754,7 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
   LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1731,8 +1769,10 @@
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+      new LStoreGlobalCell(UseTempRegister(instr->value()),
+                           TempRegister(),
+                           TempRegister());
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1948,6 +1988,27 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
+      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, temp_reg);
+    return DefineSameAsFirst(result);
+  } else {
+    LOperand* object = UseFixed(instr->object(), rax);
+    LOperand* fixed_object_reg = FixedTemp(rdx);
+    LOperand* new_map_reg = FixedTemp(rbx);
+    LTransitionElementsKind* result =
+        new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
@@ -2010,8 +2071,14 @@
 }
 
 
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralFast, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+    HObjectLiteralGeneric* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, rax), instr);
 }
 
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index d169bf6..c21223b 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -107,10 +107,12 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -132,7 +134,8 @@
   V(NumberTagD)                                 \
   V(NumberTagI)                                 \
   V(NumberUntagD)                               \
-  V(ObjectLiteral)                              \
+  V(ObjectLiteralFast)                          \
+  V(ObjectLiteralGeneric)                       \
   V(OsrEntry)                                   \
   V(OuterContext)                               \
   V(Parameter)                                  \
@@ -162,6 +165,7 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
+  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
@@ -609,17 +613,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -638,6 +643,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -666,6 +685,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -705,11 +741,12 @@
 };
 
 
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
  public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
     temps_[0] = temp;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -790,18 +827,15 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
+  LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return hydrogen()->op(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-
- private:
-  Token::Value op_;
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
 };
 
 
@@ -1197,11 +1231,12 @@
 };
 
 
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
  public:
-  explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1223,7 +1258,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1257,7 +1292,6 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
-  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1274,7 +1308,9 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1372,14 +1408,17 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
  public:
-  LCallFunction() {}
+  explicit LCallFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  int arity() const { return hydrogen()->argument_count() - 2; }
+  LOperand* function() { return inputs_[0]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
@@ -1548,7 +1587,6 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
-  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1568,7 +1606,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
 };
 
 
@@ -1653,7 +1691,31 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  bool strict_mode() { return hydrogen()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+};
+
+
+class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* new_map_temp,
+                          LOperand* temp_reg) {
+    inputs_[0] = object;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp_reg;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_reg() { return temps_[0]; }
+  LOperand* temp_reg() { return temps_[1]; }
+  Handle<Map> original_map() { return hydrogen()->original_map(); }
+  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
 };
 
 
@@ -1828,10 +1890,17 @@
 };
 
 
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
 };
 
 
@@ -2146,12 +2215,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8fcad23..caca628 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -44,6 +44,7 @@
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
+      has_frame_(false),
       root_array_available_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -54,7 +55,7 @@
 
 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
   Address roots_register_value = kRootRegisterBias +
-      reinterpret_cast<Address>(isolate->heap()->roots_address());
+      reinterpret_cast<Address>(isolate->heap()->roots_array_start());
   intptr_t delta = other.address() - roots_register_value;
   return delta;
 }
@@ -196,28 +197,47 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register addr,
+                                         Register scratch,
+                                         SaveFPRegsMode save_fp,
+                                         RememberedSetFinalAction and_then) {
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
   }
-
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, Immediate(~Page::kPageAlignmentMask));
-
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shrl(addr, Immediate(Page::kRegionSizeLog2));
-  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
-  // Set dirty mark for region.
-  bts(Operand(object, Page::kDirtyFlagOffset), addr);
+  // Load store buffer top.
+  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Store pointer to buffer.
+  movq(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  addq(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Call stub on end of buffer.
+  Label done;
+  // Check for end of buffer.
+  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -225,7 +245,7 @@
                                 Register scratch,
                                 Condition cc,
                                 Label* branch,
-                                Label::Distance near_jump) {
+                                Label::Distance distance) {
   if (Serializer::enabled()) {
     // Can't do arithmetic on external references if it might get serialized.
     // The mask isn't really an address.  We load it as an external reference in
@@ -240,7 +260,7 @@
     }
     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
     cmpq(scratch, kScratchRegister);
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   } else {
     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
     intptr_t new_space_start =
@@ -252,35 +272,88 @@
       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
     }
     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register index) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+  ASSERT(!value.is(rsi) && !dst.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of smis and stores into the young generation.
+  // catch stores of Smis.
   Label done;
-  JumpIfSmi(value, &done);
 
-  RecordWriteNonSmi(object, offset, value, index);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors. This clobbering repeats the
-  // clobbering done inside RecordWriteNonSmi but it's necessary to
-  // avoid having the fast case for smis leave the registers
-  // unchanged.
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+  }
+}
+
+
+void MacroAssembler::RecordWriteArray(Register object,
+                                      Register value,
+                                      Register index,
+                                      SaveFPRegsMode save_fp,
+                                      RememberedSetAction remembered_set_action,
+                                      SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Array access: calculate the destination address. Index is not a smi.
+  Register dst = index;
+  lea(dst, Operand(object, index, times_pointer_size,
+                   FixedArray::kHeaderSize - kHeapObjectTag));
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
@@ -289,90 +362,72 @@
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+  ASSERT(!value.is(rsi) && !address.is(rsi));
+
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmpq(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
-  JumpIfSmi(value, &done);
 
-  InNewSpace(object, value, equal, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done);
+  }
 
-  RecordWriteHelper(object, address, value);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
 
-void MacroAssembler::RecordWriteNonSmi(Register object,
-                                       int offset,
-                                       Register scratch,
-                                       Register index) {
-  Label done;
-
-  if (emit_debug_code()) {
-    Label okay;
-    JumpIfNotSmi(object, &okay, Label::kNear);
-    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
-    bind(&okay);
-
-    if (offset == 0) {
-      // index must be int32.
-      Register tmp = index.is(rax) ? rbx : rax;
-      push(tmp);
-      movl(tmp, index);
-      cmpq(tmp, index);
-      Check(equal, "Index register for RecordWrite must be untagged int32.");
-      pop(tmp);
-    }
-  }
-
-  // Test that the object address is not in the new space. We cannot
-  // update page dirty marks for new space pages.
-  InNewSpace(object, scratch, equal, &done);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = index;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.
-    lea(dst, FieldOperand(object,
-                          index,
-                          times_pointer_size,
-                          FixedArray::kHeaderSize));
-  }
-  RecordWriteHelper(object, dst, scratch);
-
-  bind(&done);
-
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors.
-  if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-  }
-}
-
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (emit_debug_code()) Check(cc, msg);
 }
@@ -400,7 +455,7 @@
   Label L;
   j(cc, &L, Label::kNear);
   Abort(msg);
-  // will not return here
+  // Control will not return here.
   bind(&L);
 }
 
@@ -448,9 +503,6 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
-
   push(rax);
   movq(kScratchRegister, p0, RelocInfo::NONE);
   push(kScratchRegister);
@@ -458,52 +510,44 @@
        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
        RelocInfo::NONE);
   push(kScratchRegister);
-  CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
+  // Control will not return here.
   int3();
 }
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
-MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-         RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  MaybeObject* result = stub->TryGetCode();
-  if (!result->IsFailure()) {
-    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
-        RelocInfo::CODE_TARGET);
-  }
-  return result;
-}
-
-
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,18 +584,11 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(rax, function->nargs);
   LoadAddress(rbx, ExternalReference(function, isolate()));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
-                                            int num_arguments) {
-  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -573,26 +610,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
-                                            int num_arguments) {
-  if (f->nargs >= 0 && f->nargs != num_arguments) {
-    IllegalOperation(num_arguments);
-    // Since we did not call the stub, there was no allocation failure.
-    // Return some non-failure object.
-    return HEAP->undefined_value();
-  }
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  LoadAddress(rbx, ExternalReference(f, isolate()));
-  CEntryStub ces(f->result_size);
-  return TryCallStub(&ces);
-}
-
-
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   Set(rax, num_arguments);
@@ -622,24 +639,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallExternalReference(
-    const ExternalReference& ext, int num_arguments, int result_size) {
-  // ----------- S t a t e -------------
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : argument num_arguments - 1
-  //  ...
-  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
-  // -----------------------------------
-
-  // TODO(1236192): Most runtime routines don't need the number of
-  // arguments passed in because it is constant. At some point we
-  // should remove this need and make the runtime routine entry code
-  // smarter.
-  Set(rax, num_arguments);
-  return TryJumpToExternalReference(ext, result_size);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -649,15 +648,6 @@
 }
 
 
-MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
-                                                int num_arguments,
-                                                int result_size) {
-  return TryTailCallExternalReference(ExternalReference(fid, isolate()),
-                                      num_arguments,
-                                      result_size);
-}
-
-
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -680,8 +670,8 @@
 }
 
 
-MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
-    ApiFunction* function, int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
+                                              int stack_space) {
   Label empty_result;
   Label prologue;
   Label promote_scheduled_exception;
@@ -711,8 +701,7 @@
   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
   addl(Operand(base_reg, kLevelOffset), Immediate(1));
   // Call the api function!
-  movq(rax,
-       reinterpret_cast<int64_t>(function->address()),
+  movq(rax, reinterpret_cast<int64_t>(function_address),
        RelocInfo::RUNTIME_ENTRY);
   call(rax);
 
@@ -744,11 +733,7 @@
   ret(stack_space * kPointerSize);
 
   bind(&promote_scheduled_exception);
-  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
-                                           0, 1);
-  if (result->IsFailure()) {
-    return result;
-  }
+  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
 
   bind(&empty_result);
   // It was zero; the result is undefined.
@@ -769,8 +754,6 @@
   call(rax);
   movq(rax, prev_limit_reg);
   jmp(&leave_exit_frame);
-
-  return result;
 }
 
 
@@ -783,20 +766,11 @@
 }
 
 
-MaybeObject* MacroAssembler::TryJumpToExternalReference(
-    const ExternalReference& ext, int result_size) {
-  // Set the entry point and jump to the C entry runtime stub.
-  LoadAddress(rbx, ext);
-  CEntryStub ces(result_size);
-  return TryTailCallStub(&ces);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -825,6 +799,57 @@
 }
 
 
+static const Register saved_regs[] =
+    { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+                                     Register exclusion1,
+                                     Register exclusion2,
+                                     Register exclusion3) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  for (int i = 0; i < kNumberOfSavedRegs; i++) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      push(reg);
+    }
+  }
+  // R12 to r15 are callee save on all platforms.
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(Operand(rsp, i * kDoubleSize), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+                                    Register exclusion1,
+                                    Register exclusion2,
+                                    Register exclusion3) {
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(reg, Operand(rsp, i * kDoubleSize));
+    }
+    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      pop(reg);
+    }
+  }
+}
+
+
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2236,6 +2261,13 @@
 }
 
 
+void MacroAssembler::TestBit(const Operand& src, int bits) {
+  int byte_offset = bits / kBitsPerByte;
+  int bit_in_byte = bits & (kBitsPerByte - 1);
+  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+}
+
+
 void MacroAssembler::Jump(ExternalReference ext) {
   LoadAddress(kScratchRegister, ext);
   jmp(kScratchRegister);
@@ -2385,86 +2417,105 @@
 
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
-                                    HandlerType type) {
+                                    HandlerType type,
+                                    int handler_index) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // The pc (return address) is already on TOS.  This code pushes state,
-  // frame pointer, context, and current handler.
+  // We will build up the handler from the bottom by pushing on the stack.
+  // First compute the state and push the frame pointer and context.
+  unsigned state = StackHandler::OffsetField::encode(handler_index);
   if (try_location == IN_JAVASCRIPT) {
-    if (type == TRY_CATCH_HANDLER) {
-      push(Immediate(StackHandler::TRY_CATCH));
-    } else {
-      push(Immediate(StackHandler::TRY_FINALLY));
-    }
     push(rbp);
     push(rsi);
+    state |= (type == TRY_CATCH_HANDLER)
+        ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+        : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
-    // The frame pointer does not point to a JS frame so we save NULL
-    // for rbp. We expect the code throwing an exception to check rbp
-    // before dereferencing it to restore the context.
-    push(Immediate(StackHandler::ENTRY));
+    // The frame pointer does not point to a JS frame so we save NULL for
+    // rbp. We expect the code throwing an exception to check rbp before
+    // dereferencing it to restore the context.
     push(Immediate(0));  // NULL frame pointer.
     Push(Smi::FromInt(0));  // No context.
+    state |= StackHandler::KindField::encode(StackHandler::ENTRY);
   }
-  // Save the current handler.
-  Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
-  push(handler_operand);
-  // Link this handler.
-  movq(handler_operand, rsp);
+
+  // Push the state and the code object.
+  push(Immediate(state));
+  Push(CodeObject());
+
+  // Link the current handler as the next handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  push(ExternalOperand(handler_address));
+  // Set this new handler as the current one.
+  movq(ExternalOperand(handler_address), rsp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
-  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
-  // Unlink this handler.
-  Operand handler_operand =
-      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
-  pop(handler_operand);
-  // Remove the remaining fields.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  pop(ExternalOperand(handler_address));
   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
 
 
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // rax = exception, rdi = code object, rdx = state.
+  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+  shr(rdx, Immediate(StackHandler::kKindWidth));
+  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+  SmiToInteger64(rdx, rdx);
+  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+  jmp(rdi);
+}
+
+
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // Keep thrown value in rax.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
-
+  // Drop the stack pointer to the top of the top handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  Operand handler_operand = ExternalOperand(handler_address);
-  movq(rsp, handler_operand);
-  // get next in chain
-  pop(handler_operand);
+  movq(rsp, ExternalOperand(handler_address));
+  // Restore the next handler.
+  pop(ExternalOperand(handler_address));
+
+  // Remove the code object and state, compute the handler address in rdi.
+  pop(rdi);  // Code object.
+  pop(rdx);  // Offset and state.
+
+  // Restore the context and frame pointer.
   pop(rsi);  // Context.
   pop(rbp);  // Frame pointer.
-  pop(rdx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
-  // of them.
+  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
+  // rbp or rsi.
   Label skip;
-  cmpq(rdx, Immediate(StackHandler::ENTRY));
-  j(equal, &skip, Label::kNear);
+  testq(rsi, rsi);
+  j(zero, &skip, Label::kNear);
   movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   bind(&skip);
 
-  ret(0);
+  JumpToHandlerEntry();
 }
 
 
@@ -2472,40 +2523,17 @@
                                       Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-  // Keep thrown value in rax.
-  if (!value.is(rax)) {
-    movq(rax, value);
-  }
-  // Fetch top stack handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  Load(rsp, handler_address);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
-  // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
-  bind(&loop);
-  // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kStateOffset;
-  cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done, Label::kNear);
-  // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kNextOffset;
-  movq(rsp, Operand(rsp, kNextOffset));
-  jmp(&loop);
-  bind(&done);
-
-  // Set the top handler address to next handler past the current ENTRY handler.
-  Operand handler_operand = ExternalOperand(handler_address);
-  pop(handler_operand);
-
+  // The exception is expected in rax.
   if (type == OUT_OF_MEMORY) {
     // Set external caught exception to false.
-    ExternalReference external_caught(
-        Isolate::kExternalCaughtExceptionAddress, isolate());
+    ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+                                      isolate());
     Set(rax, static_cast<int64_t>(false));
     Store(external_caught, rax);
 
@@ -2514,16 +2542,38 @@
                                         isolate());
     movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
     Store(pending_exception, rax);
+  } else if (!value.is(rax)) {
+    movq(rax, value);
   }
 
-  // Discard the context saved in the handler and clear the context pointer.
-  pop(rdx);
-  Set(rsi, 0);
+  // Drop the stack pointer to the top of the top stack handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+  Load(rsp, handler_address);
 
-  pop(rbp);  // Restore frame pointer.
-  pop(rdx);  // Discard state.
+  // Unwind the handlers until the top ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind, Label::kNear);
+  bind(&fetch_next);
+  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
 
-  ret(0);
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::ENTRY == 0);
+  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
+        Immediate(StackHandler::KindField::kMask));
+  j(not_zero, &fetch_next);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(ExternalOperand(handler_address));
+
+  // Remove the code object and state, compute the handler address in rdi.
+  pop(rdi);  // Code object.
+  pop(rdx);  // Offset and state.
+
+  // Clear the context pointer and frame pointer (0 was saved in the handler).
+  pop(rsi);
+  pop(rbp);
+
+  JumpToHandlerEntry();
 }
 
 
@@ -2567,13 +2617,91 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Immediate(Map::kMaximumBitField2FastElementValue));
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register index,
+    XMMRegister xmm_scratch,
+    Label* fail) {
+  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmpl(FieldOperand(maybe_number, offset),
+       Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+  bind(&have_double_value);
+  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  // Convert all NaNs to the same canonical NaN value when they are stored in
+  // the double array.
+  Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  movq(xmm_scratch, kScratchRegister);
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  SmiToInteger32(kScratchRegister, maybe_number);
+  cvtlsi2sd(xmm_scratch, kScratchRegister);
+  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
@@ -2707,7 +2835,8 @@
 
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
-                                             Label* miss) {
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
   // Check that the receiver isn't a smi.
   testl(function, Immediate(kSmiTagMask));
   j(zero, miss);
@@ -2716,6 +2845,17 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
+  if (miss_on_bound_function) {
+    movq(kScratchRegister,
+         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
+    // field).
+    TestBit(FieldOperand(kScratchRegister,
+                         SharedFunctionInfo::kCompilerHintsOffset),
+            SharedFunctionInfo::kBoundFunction);
+    j(not_zero, miss);
+  }
+
   // Make sure that the function has an instance prototype.
   Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -2787,10 +2927,10 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   Set(rax, 0);  // No arguments.
   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif  // ENABLE_DEBUGGER_SUPPORT
@@ -2816,6 +2956,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   InvokePrologue(expected,
                  actual,
@@ -2847,6 +2990,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   Register dummy = rax;
   InvokePrologue(expected,
@@ -2877,6 +3023,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2891,34 +3040,24 @@
 }
 
 
-void MacroAssembler::InvokeFunction(JSFunction* function,
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  ASSERT(function->is_compiled());
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Get the function and setup the context.
-  Move(rdi, Handle<JSFunction>(function));
+  Move(rdi, function);
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  if (V8::UseCrankshaft()) {
-    // Since Crankshaft can recompile a function, we need to load
-    // the Code object every time we call the function.
-    movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-  } else {
-    // Invoke the cached code.
-    Handle<Code> code(function->code());
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(code,
-               expected,
-               actual,
-               RelocInfo::CODE_TARGET,
-               flag,
-               call_wrapper,
-               call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -3210,42 +3349,6 @@
 }
 
 
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
-  // First of all we assign the hash seed to scratch.
-  LoadRoot(scratch, Heap::kHashSeedRootIndex);
-  SmiToInteger32(scratch, scratch);
-
-  // Xor original key with a seed.
-  xorl(r0, scratch);
-
-  // Compute the hash code from the untagged key.  This must be kept in sync
-  // with ComputeIntegerHash in utils.h.
-  //
-  // hash = ~hash + (hash << 15);
-  movl(scratch, r0);
-  notl(r0);
-  shll(scratch, Immediate(15));
-  addl(r0, scratch);
-  // hash = hash ^ (hash >> 12);
-  movl(scratch, r0);
-  shrl(scratch, Immediate(12));
-  xorl(r0, scratch);
-  // hash = hash + (hash << 2);
-  leal(r0, Operand(r0, r0, times_4, 0));
-  // hash = hash ^ (hash >> 4);
-  movl(scratch, r0);
-  shrl(scratch, Immediate(4));
-  xorl(r0, scratch);
-  // hash = hash * 2057;
-  imull(r0, r0, Immediate(2057));
-  // hash = hash ^ (hash >> 16);
-  movl(scratch, r0);
-  shrl(scratch, Immediate(16));
-  xorl(r0, scratch);
-}
-
-
-
 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
                                               Register elements,
                                               Register key,
@@ -3276,11 +3379,34 @@
 
   Label done;
 
-  GetNumberHash(r0, r1);
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  movl(r1, r0);
+  notl(r0);
+  shll(r1, Immediate(15));
+  addl(r0, r1);
+  // hash = hash ^ (hash >> 12);
+  movl(r1, r0);
+  shrl(r1, Immediate(12));
+  xorl(r0, r1);
+  // hash = hash + (hash << 2);
+  leal(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  movl(r1, r0);
+  shrl(r1, Immediate(4));
+  xorl(r0, r1);
+  // hash = hash * 2057;
+  imull(r0, r0, Immediate(2057));
+  // hash = hash ^ (hash >> 16);
+  movl(r1, r0);
+  shrl(r1, Immediate(16));
+  xorl(r0, r1);
 
   // Compute capacity mask.
-  SmiToInteger32(r1, FieldOperand(elements,
-                                  SeededNumberDictionary::kCapacityOffset));
+  SmiToInteger32(r1,
+                 FieldOperand(elements, NumberDictionary::kCapacityOffset));
   decl(r1);
 
   // Generate an unrolled loop that performs a few probes before giving up.
@@ -3290,19 +3416,19 @@
     movq(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+      addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
     }
     and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    ASSERT(NumberDictionary::kEntrySize == 3);
     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
 
     // Check if the key matches.
     cmpq(key, FieldOperand(elements,
                            r2,
                            times_pointer_size,
-                           SeededNumberDictionary::kElementsStartOffset));
+                           NumberDictionary::kElementsStartOffset));
     if (i != (kProbes - 1)) {
       j(equal, &done);
     } else {
@@ -3313,7 +3439,7 @@
   bind(&done);
   // Check that the value is a normal propety.
   const int kDetailsOffset =
-      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   ASSERT_EQ(NORMAL, 0);
   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Smi::FromInt(PropertyDetails::TypeField::kMask));
@@ -3321,7 +3447,7 @@
 
   // Get the value at the masked, scaled index.
   const int kValueOffset =
-      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+      NumberDictionary::kElementsStartOffset + kPointerSize;
   movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
 }
 
@@ -3772,6 +3898,20 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  movq(Operand(start_offset, 0), filler);
+  addq(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmpq(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -3871,6 +4011,7 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -3885,6 +4026,17 @@
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -3905,6 +4057,195 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    movq(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+          Immediate(static_cast<uint8_t>(mask)));
+  } else {
+    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register bitmap_scratch,
+                                 Register mask_scratch,
+                                 Label* on_black,
+                                 Label::Distance on_black_distance) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  // The mask_scratch register contains a 1 at the position of the first bit
+  // and a 0 at all other positions, including the position of the second bit.
+  movq(rcx, mask_scratch);
+  // Make rcx into a mask that covers both marking bits using the operation
+  // rcx = mask | (mask << 1).
+  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+  // Note that we are using a 4-byte aligned 8-byte load.
+  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  cmpq(mask_scratch, rcx);
+  j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+    Register value,
+    Register scratch,
+    Label* not_data_object,
+    Label::Distance not_data_object_distance) {
+  Label is_data_object;
+  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  j(equal, &is_data_object, Label::kNear);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+        Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, not_data_object, not_data_object_distance);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+  movq(bitmap_reg, addr_reg);
+  // Sign extended 32 bit immediate.
+  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  movq(rcx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shrl(rcx, Immediate(shift));
+  and_(rcx,
+       Immediate((Page::kPageAlignmentMask >> shift) &
+                 ~(Bitmap::kBytesPerCell - 1)));
+
+  addq(bitmap_reg, rcx);
+  movq(rcx, addr_reg);
+  shrl(rcx, Immediate(kPointerSizeLog2));
+  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+  movl(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    addq(mask_scratch, mask_scratch);
+    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = rcx;  // Holds map while checking type.
+  Register length = rcx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  movq(map, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  j(not_equal, &not_heap_number, Label::kNear);
+  movq(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = rcx;
+  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  testb(instance_type, Immediate(kExternalStringTag));
+  j(zero, &not_external, Label::kNear);
+  movq(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  addq(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+  bind(&done);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ff6edc5..cf03e59 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -49,18 +50,23 @@
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
-static const Register kScratchRegister = { 10 };      // r10.
-static const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
-static const Register kRootRegister = { 13 };         // r13 (callee save).
+const Register kScratchRegister = { 10 };      // r10.
+const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
+const Register kRootRegister = { 13 };         // r13 (callee save).
 // Value of smi in kSmiConstantRegister.
-static const int kSmiConstantRegisterValue = 1;
+const int kSmiConstantRegisterValue = 1;
 // Actual value of root register is offset from the root array's start
 // to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
+const int kRootRegisterBias = 128;
 
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
 // Forward declaration.
 class JumpTarget;
 
@@ -72,6 +78,7 @@
   ScaleFactor scale;
 };
 
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -134,56 +141,145 @@
   void CompareRoot(const Operand& with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
 
-  // ---------------------------------------------------------------------------
-  // GC Support
+  // These functions do not arrange the registers in any particular order so
+  // they are not useful for calls that can cause a GC.  The caller can
+  // exclude up to 3 registers that do not need to be saved and restored.
+  void PushCallerSaved(SaveFPRegsMode fp_mode,
+                       Register exclusion1 = no_reg,
+                       Register exclusion2 = no_reg,
+                       Register exclusion3 = no_reg);
+  void PopCallerSaved(SaveFPRegsMode fp_mode,
+                      Register exclusion1 = no_reg,
+                      Register exclusion2 = no_reg,
+                      Register exclusion3 = no_reg);
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+// ---------------------------------------------------------------------------
+// GC Support
 
-  // Check if object is in new space. The condition cc can be equal or
-  // not_equal. If it is equal a jump will be done if the object is on new
-  // space. The register scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
-                  Label* branch,
-                  Label::Distance near_jump = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_equal, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, equal, branch, distance);
+  }
+
+  // Check if an object has the black incremental marking color.  Also uses rcx!
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Detects conservatively whether an object is data-only, ie it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object,
+                        Label::Distance not_data_object_distance);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a non-smi.
+  // All registers are clobbered by the operation RecordWriteArray
+  // filters out smis so it does not update the write barrier if the
+  // value is a smi.
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If |offset| is zero, then the |scratch|
-  // register contains the array index into the elements array
-  // represented as an untagged 32-bit integer. All registers are
-  // clobbered by the operation. RecordWrite filters out smis so it
-  // does not update the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
-
-  // For page containing |object| mark region covering [address]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation.  RecordWrite filters out smis so it does not update
   // the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
-
-  // For page containing |object| mark region covering [object+offset] dirty.
-  // The value is known to not be a smi.
-  // object is the object being stored into, value is the object being stored.
-  // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as an untagged 32-bit integer.
-  // All registers are clobbered by the operation.
-  void RecordWriteNonSmi(Register object,
-                         int offset,
-                         Register value,
-                         Register scratch);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -192,15 +288,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame; either in normal or
   // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
@@ -232,9 +319,9 @@
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   void InitializeRootRegister() {
-    ExternalReference roots_address =
-        ExternalReference::roots_address(isolate());
-    movq(kRootRegister, roots_address);
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    movq(kRootRegister, roots_array_start);
     addq(kRootRegister, Immediate(kRootRegisterBias));
   }
 
@@ -270,7 +357,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(JSFunction* function,
+  void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -639,6 +726,7 @@
   void Push(Smi* smi);
   void Test(const Operand& dst, Smi* source);
 
+
   // ---------------------------------------------------------------------------
   // String macros.
 
@@ -684,6 +772,9 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
+  // Bit-field support.
+  void TestBit(const Operand& dst, int bit_index);
+
   // Handle support
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
@@ -760,6 +851,28 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by index in
+  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
+  // index must not be smi-tagged.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register index,
+                                   XMMRegister xmm_scratch,
+                                   Label* fail);
+
   // Check if the map of an object is equal to a specified map and
   // branch to label if not. Skip the smi check if not required
   // (object is known to be a heap object)
@@ -820,9 +933,10 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.  The return
-  // address must be pushed before calling this helper.
-  void PushTryHandler(CodeLocation try_location, HandlerType type);
+  // Push a new try handler and link it into try handler chain.
+  void PushTryHandler(CodeLocation try_location,
+                      HandlerType type,
+                      int handler_index);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
@@ -845,7 +959,6 @@
                               Register scratch,
                               Label* miss);
 
-  void GetNumberHash(Register r0, Register scratch);
 
   void LoadFromNumberDictionary(Label* miss,
                                 Register elements,
@@ -966,7 +1079,8 @@
   // clobbered.
   void TryGetFunctionPrototype(Register function,
                                Register result,
-                               Label* miss);
+                               Label* miss,
+                               bool miss_on_bound_function = false);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -994,19 +1108,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
-  // Call a code stub and return the code object called.  Try to generate
-  // the code if necessary.  Do not perform a GC but instead return a retry
-  // after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
-
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
-  // Tail call a code stub (jump) and return the code object called.  Try to
-  // generate the code if necessary.  Do not perform a GC but instead return
-  // a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
-
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -1016,19 +1120,9 @@
   // Call a runtime function and save the value of XMM registers.
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
-  // Call a runtime function, returning the CodeStub object called.
-  // Try to generate the stub code if necessary.  Do not perform a GC
-  // but instead return a retry after GC failure.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
-                                              int num_arguments);
-
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
-  // Convenience function: Same as above, but takes the fid instead.
-  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
-                                              int num_arguments);
-
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
                              int num_arguments);
@@ -1040,38 +1134,26 @@
                                  int num_arguments,
                                  int result_size);
 
-  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
-      const ExternalReference& ext, int num_arguments, int result_size);
-
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
-                                                   int num_arguments,
-                                                   int result_size);
-
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext, int result_size);
 
-  // Jump to a runtime routine.
-  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
-                                          int result_size);
-
-  // Prepares stack to put arguments (aligns and so on).
-  // WIN64 calling convention requires to put the pointer to the return value
-  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
-  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
+  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
+  // convention requires to put the pointer to the return value slot into
+  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
+  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
   void PrepareCallApiFunction(int arg_stack_space);
 
-  // Calls an API function. Allocates HandleScope, extracts
-  // returned value from handle and propagates exceptions.
-  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
-  // On return removes stack_space * kPointerSize (GCed).
-  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
-      ApiFunction* function, int stack_space);
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
+  // caller-save registers.  Restores context.  On return removes
+  // stack_space * kPointerSize (GCed).
+  void CallApiFunctionAndReturn(Address function_address, int stack_space);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1120,6 +1202,13 @@
                  int min_length = 0,
                  Register scratch = kScratchRegister);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
 
   // ---------------------------------------------------------------------------
   // StatsCounter support
@@ -1152,11 +1241,18 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1166,6 +1262,7 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   bool root_array_available_;
 
   // Returns a register holding the smi value. The register MUST NOT be
@@ -1189,10 +1286,6 @@
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue(bool save_rax);
 
   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1219,6 +1312,24 @@
                                Register scratch,
                                bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* branch,
+                  Label::Distance distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -1256,32 +1367,32 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-static inline Operand FieldOperand(Register object, int offset) {
+inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-static inline Operand FieldOperand(Register object,
-                                   Register index,
-                                   ScaleFactor scale,
-                                   int offset) {
+inline Operand FieldOperand(Register object,
+                            Register index,
+                            ScaleFactor scale,
+                            int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-static inline Operand ContextOperand(Register context, int index) {
+inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-static inline Operand GlobalObjectOperand() {
+inline Operand GlobalObjectOperand() {
   return ContextOperand(rsi, Context::GLOBAL_INDEX);
 }
 
 
 // Provides access to exit frame stack space (not GCed).
-static inline Operand StackSpaceOperand(int index) {
+inline Operand StackSpaceOperand(int index) {
 #ifdef _WIN64
   const int kShaddowSpace = 4;
   return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a782bd7..1e0cd6a 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@
 void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@
 
 void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -431,9 +431,14 @@
     // Isolate.
     __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
-    __ CallCFunction(compare, num_arguments);
+
+    { // NOLINT: Can't find a way to open this scope without confusing the
+      // linter.
+      AllowExternalCallThatCantCauseGC scope(&masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+      __ CallCFunction(compare, num_arguments);
+    }
 
     // Restore original values before reacting on result value.
     __ Move(code_object_pointer(), masm_.CodeObject());
@@ -706,7 +711,12 @@
   // registers we need.
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(&masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(rbp);
   __ movq(rbp, rsp);
   // Save parameters and callee-save registers. Order here should correspond
@@ -1238,6 +1248,11 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 76d2555..58994f2 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -82,13 +82,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
-    MacroAssembler* masm,
-    Label* miss_label,
-    Register receiver,
-    String* name,
-    Register r0,
-    Register r1) {
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             Handle<String> name,
+                                             Register r0,
+                                             Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -118,19 +117,14 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
-      masm,
-      miss_label,
-      &done,
-      properties,
-      name,
-      r1);
-  if (result->IsFailure()) return result;
-
+  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     properties,
+                                                     name,
+                                                     r1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
-
-  return result;
 }
 
 
@@ -211,7 +205,10 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+    MacroAssembler* masm,
+    int index,
+    Register prototype,
+    Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ Move(prototype, isolate->global());
@@ -219,8 +216,8 @@
           prototype);
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  JSFunction* function =
-      JSFunction::cast(isolate->global_context()->get(index));
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->global_context()->get(index)));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -312,8 +309,10 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst, Register src,
-                                            JSObject* holder, int index) {
+                                            Register dst,
+                                            Register src,
+                                            Handle<JSObject> holder,
+                                            int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -333,11 +332,11 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     JSObject* holder_obj) {
+                                     Handle<JSObject> holder_obj) {
   __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
-  __ Move(kScratchRegister, Handle<Object>(interceptor));
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  __ Move(kScratchRegister, interceptor);
   __ push(kScratchRegister);
   __ push(receiver);
   __ push(holder);
@@ -345,11 +344,12 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
-                                                   Register receiver,
-                                                   Register holder,
-                                                   Register name,
-                                                   JSObject* holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm,
+    Register receiver,
+    Register holder,
+    Register name,
+    Handle<JSObject> holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -403,9 +403,9 @@
 
 
 // Generates call to API function.
-static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
-                                        const CallOptimization& optimization,
-                                        int argc) {
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
   // ----------- S t a t e -------------
   //  -- rsp[0]              : return address
   //  -- rsp[8]              : object passing the type check
@@ -420,29 +420,25 @@
   //  -- rsp[(argc + 4) * 8] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  JSFunction* function = optimization.constant_function();
-  __ Move(rdi, Handle<JSFunction>(function));
+  Handle<JSFunction> function = optimization.constant_function();
+  __ Move(rdi, function);
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ movq(Operand(rsp, 2 * kPointerSize), rdi);
-  Object* call_data = optimization.api_call_info()->data();
-  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
-  if (masm->isolate()->heap()->InNewSpace(call_data)) {
-    __ Move(rcx, api_call_info_handle);
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data(api_call_info->data());
+  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
+    __ Move(rcx, api_call_info);
     __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
     __ movq(Operand(rsp, 3 * kPointerSize), rbx);
   } else {
-    __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
+    __ Move(Operand(rsp, 3 * kPointerSize), call_data);
   }
 
   // Prepare arguments.
   __ lea(rbx, Operand(rsp, 3 * kPointerSize));
 
-  Object* callback = optimization.api_call_info()->callback();
-  Address api_function_address = v8::ToCData<Address>(callback);
-  ApiFunction fun(api_function_address);
-
 #ifdef _WIN64
   // Win64 uses first register--rcx--for returned value.
   Register arguments_arg = rdx;
@@ -465,12 +461,11 @@
 
   // v8::InvocationCallback's argument.
   __ lea(arguments_arg, StackSpaceOperand(0));
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm->TryCallApiFunctionAndReturn(&fun,
-                                           argc + kFastApiCallArguments + 1);
+
+  // Function address is a foreign pointer outside V8's heap.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ CallApiFunctionAndReturn(function_address,
+                              argc + kFastApiCallArguments + 1);
 }
 
 
@@ -485,16 +480,16 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  MaybeObject* Compile(MacroAssembler* masm,
-                       JSObject* object,
-                       JSObject* holder,
-                       String* name,
-                       LookupResult* lookup,
-                       Register receiver,
-                       Register scratch1,
-                       Register scratch2,
-                       Register scratch3,
-                       Label* miss) {
+  void Compile(MacroAssembler* masm,
+               Handle<JSObject> object,
+               Handle<JSObject> holder,
+               Handle<String> name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Register scratch3,
+               Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -502,45 +497,27 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
-
     if (optimization.is_constant_call()) {
-      return CompileCacheable(masm,
-                              object,
-                              receiver,
-                              scratch1,
-                              scratch2,
-                              scratch3,
-                              holder,
-                              lookup,
-                              name,
-                              optimization,
-                              miss);
+      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
+                       holder, lookup, name, optimization, miss);
     } else {
-      CompileRegular(masm,
-                     object,
-                     receiver,
-                     scratch1,
-                     scratch2,
-                     scratch3,
-                     name,
-                     holder,
-                     miss);
-      return masm->isolate()->heap()->undefined_value();  // Success.
+      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
+                     name, holder, miss);
     }
   }
 
  private:
-  MaybeObject* CompileCacheable(MacroAssembler* masm,
-                                JSObject* object,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                JSObject* interceptor_holder,
-                                LookupResult* lookup,
-                                String* name,
-                                const CallOptimization& optimization,
-                                Label* miss_label) {
+  void CompileCacheable(MacroAssembler* masm,
+                        Handle<JSObject> object,
+                        Register receiver,
+                        Register scratch1,
+                        Register scratch2,
+                        Register scratch3,
+                        Handle<JSObject> interceptor_holder,
+                        LookupResult* lookup,
+                        Handle<String> name,
+                        const CallOptimization& optimization,
+                        Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -549,16 +526,14 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 =
-          optimization.GetPrototypeDepthOfExpectedType(object,
-                                                       interceptor_holder);
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(
+          object, interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 =
-            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
-                                                         lookup->holder());
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(
+            interceptor_holder, Handle<JSObject>(lookup->holder()));
       }
-      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
-                             (depth2 != kInvalidProtoDepth);
+      can_do_fast_api_call =
+          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -574,9 +549,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver,
-                                        interceptor_holder, scratch1,
-                                        scratch2, scratch3, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3,
+                                        name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -589,10 +564,11 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      lookup->holder(), scratch1,
-                                      scratch2, scratch3, name, depth2, miss);
+                                      Handle<JSObject>(lookup->holder()),
+                                      scratch1, scratch2, scratch3,
+                                      name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -603,10 +579,7 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      MaybeObject* result = GenerateFastApiCall(masm,
-                                                optimization,
-                                                arguments_.immediate());
-      if (result->IsFailure()) return result;
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -627,33 +600,27 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
-
-    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      JSObject* object,
+                      Handle<JSObject> object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      String* name,
-                      JSObject* interceptor_holder,
+                      Handle<String> name,
+                      Handle<JSObject> interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3, name,
-                                        miss_label);
+                                        scratch1, scratch2, scratch3,
+                                        name, miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             interceptor_holder);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -662,27 +629,30 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           JSObject* holder_obj,
+                           Handle<JSObject> holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(not_equal, interceptor_succeeded);
@@ -697,32 +667,26 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Code* code = NULL;
-  if (kind == Code::LOAD_IC) {
-    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
-  } else {
-    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
-  }
-
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code = (kind == Code::LOAD_IC)
+      ? masm->isolate()->builtins()->LoadIC_Miss()
+      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Code* code = masm->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_MissForceGeneric);
-  Handle<Code> ic(code);
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> code =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      JSObject* object,
+                                      Handle<JSObject> object,
                                       int index,
-                                      Map* transition,
+                                      Handle<Map> transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -745,12 +709,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ Push(Handle<Map>(transition));
+    __ Push(transition);
     __ push(rax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -761,11 +725,10 @@
     return;
   }
 
-  if (transition != NULL) {
+  if (!transition.is_null()) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-            Handle<Map>(transition));
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -781,7 +744,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ RecordWriteField(
+        receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -792,7 +756,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ RecordWriteField(
+        scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
   }
 
   // Return the value (register rax).
@@ -803,37 +768,53 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
-    MacroAssembler* masm,
-    GlobalObject* global,
-    String* name,
-    Register scratch,
-    Label* miss) {
-  Object* probe;
-  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
-    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
-  }
-  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                      Handle<GlobalObject> global,
+                                      Handle<String> name,
+                                      Register scratch,
+                                      Label* miss) {
+  Handle<JSGlobalPropertyCell> cell =
+      GlobalObject::EnsurePropertyCell(global, name);
   ASSERT(cell->value()->IsTheHole());
-  __ Move(scratch, Handle<Object>(cell));
+  __ Move(scratch, cell);
   __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
          masm->isolate()->factory()->the_hole_value());
   __ j(not_equal, miss);
-  return cell;
 }
 
 
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static void GenerateCheckPropertyCells(MacroAssembler* masm,
+                                       Handle<JSObject> object,
+                                       Handle<JSObject> holder,
+                                       Handle<String> name,
+                                       Register scratch,
+                                       Label* miss) {
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    if (current->IsGlobalObject()) {
+      GenerateCheckPropertyCell(masm,
+                                Handle<GlobalObject>::cast(current),
+                                name,
+                                scratch,
+                                miss);
+    }
+    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+  }
+}
+
 #undef __
 #define __ ACCESS_MASM((masm()))
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
+Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
                                        Register object_reg,
-                                       JSObject* holder,
+                                       Handle<JSObject> holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       String* name,
+                                       Handle<String> name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -853,80 +834,58 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  JSObject* current = object;
-  while (current != holder) {
-    depth++;
+  Handle<JSObject> current = object;
+  while (!current.is_identical_to(holder)) {
+    ++depth;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* lookup_result = heap()->LookupSymbol(name);
-        if (lookup_result->IsFailure()) {
-          set_failure(Failure::cast(lookup_result));
-          return reg;
-        } else {
-          name = String::cast(lookup_result->ToObjectUnchecked());
-        }
+        name = factory()->LookupSymbol(name);
       }
-      ASSERT(current->property_dictionary()->FindEntry(name) ==
+      ASSERT(current->property_dictionary()->FindEntry(*name) ==
              StringDictionary::kNotFound);
 
-      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
-                                                                      miss,
-                                                                      reg,
-                                                                      name,
-                                                                      scratch1,
-                                                                      scratch2);
-      if (negative_lookup->IsFailure()) {
-        set_failure(Failure::cast(negative_lookup));
-        return reg;
-      }
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+                                       scratch1, scratch2);
 
       __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // from now the object is in holder_reg
+      reg = holder_reg;  // From now on the object will be in holder_reg.
       __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else if (heap()->InNewSpace(prototype)) {
-      // Get the map of the current object.
-      __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ Cmp(scratch1, Handle<Map>(current->map()));
-      // Branch on the result of the map check.
-      __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-
     } else {
-      // Check the map of the current object.
-      __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-          Handle<Map>(current->map()));
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      Handle<Map> current_map(current->map());
+      if (in_new_space) {
+        // Save the map in scratch1 for later.
+        __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+        __ Cmp(scratch1, current_map);
+      } else {
+        __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), current_map);
+      }
       // Branch on the result of the map check.
       __ j(not_equal, miss);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
       }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      __ Move(reg, Handle<JSObject>(prototype));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (in_new_space) {
+        // The prototype is in new space; we cannot store a reference to it
+        // in the code.  Load it from the map.
+        __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        // The prototype is in old space; load it directly.
+        __ Move(reg, prototype);
+      }
     }
 
     if (save_at_depth == depth) {
@@ -936,62 +895,46 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
+  ASSERT(current.is_identical_to(holder));
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
   // Check the holder map.
   __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
   __ j(not_equal, miss);
 
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(current == holder);
+  // Perform security check for access to the global object.
   ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
   if (current->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
   }
 
-  // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.  We also need to check that the
-  // property cell for the property is still empty.
-  current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                    GlobalObject::cast(current),
-                                                    name,
-                                                    scratch1,
-                                                    miss);
-      if (cell->IsFailure()) {
-        set_failure(Failure::cast(cell));
-        return reg;
-      }
-    }
-    current = JSObject::cast(current->GetPrototype());
-  }
+  // If we've skipped any global objects, it's not enough to verify that
+  // their maps haven't changed.  We also need to check that the property
+  // cell for the property is still empty.
+  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(JSObject* object,
-                                     JSObject* holder,
+void StubCompiler::GenerateLoadField(Handle<JSObject> object,
+                                     Handle<JSObject> holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     String* name,
+                                     Handle<String> name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg =
-      CheckPrototypes(object, receiver, holder,
-                      scratch1, scratch2, scratch3, name, miss);
+  Register reg = CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -999,25 +942,22 @@
 }
 
 
-MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
-                                                JSObject* holder,
-                                                Register receiver,
-                                                Register name_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                AccessorInfo* callback,
-                                                String* name,
-                                                Label* miss) {
+void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Handle<AccessorInfo> callback,
+                                        Handle<String> name,
+                                        Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg =
-      CheckPrototypes(object, receiver, holder, scratch1,
-                      scratch2, scratch3, name, miss);
-
-  Handle<AccessorInfo> callback_handle(callback);
+  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
+                                 scratch2, scratch3, name, miss);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch2.is(reg));
@@ -1025,11 +965,11 @@
 
   __ push(receiver);  // receiver
   __ push(reg);  // holder
-  if (heap()->InNewSpace(callback_handle->data())) {
-    __ Move(scratch1, callback_handle);
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch1, callback);
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));  // data
   } else {
-    __ Push(Handle<Object>(callback_handle->data()));
+    __ Push(Handle<Object>(callback->data()));
   }
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
@@ -1048,10 +988,6 @@
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // Do call through the api.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-
   // 3 elements array for v8::Agruments::values_ and handler for name.
   const int kStackSpace = 4;
 
@@ -1068,45 +1004,42 @@
   // could be used to pass arguments.
   __ lea(accessor_info_arg, StackSpaceOperand(0));
 
-  // Emitting a stub call may try to allocate (if the code is not
-  // already generated).  Do not allow the assembler to perform a
-  // garbage collection but instead return the allocation failure
-  // object.
-  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(JSObject* object,
-                                        JSObject* holder,
+void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
+                                        Handle<JSObject> holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Object* value,
-                                        String* name,
+                                        Handle<Object> value,
+                                        Handle<String> name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, receiver, holder,
-                  scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(
+      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ Move(rax, Handle<Object>(value));
+  __ Move(rax, value);
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(JSObject* object,
-                                           JSObject* interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
+                                           Handle<JSObject> interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           String* name,
+                                           Handle<String> name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1122,9 +1055,9 @@
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo() &&
-        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
-      compile_followup_inline = true;
+               lookup->GetCallbackObject()->IsAccessorInfo()) {
+      compile_followup_inline =
+          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
     }
   }
 
@@ -1139,47 +1072,49 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (interceptor_holder != lookup->holder()) {
+    if (*interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   lookup->holder(),
+                                   Handle<JSObject>(lookup->holder()),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1191,15 +1126,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), rax, holder_reg,
-                               lookup->holder(), lookup->GetFieldIndex());
+                               Handle<JSObject>(lookup->holder()),
+                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
-      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
-      ASSERT(callback != NULL);
+      Handle<AccessorInfo> callback(
+          AccessorInfo::cast(lookup->GetCallbackObject()));
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1208,7 +1143,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ Move(holder_reg, Handle<AccessorInfo>(callback));
+      __ Move(holder_reg, callback);
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1237,17 +1172,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Cmp(rcx, Handle<String>(name));
+    __ Cmp(rcx, name);
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
-                                                   JSObject* holder,
-                                                   String* name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<String> name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1260,7 +1195,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(rdx, miss);
   }
 
@@ -1269,15 +1204,16 @@
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Label* miss) {
   // Get the value from the cell.
-  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+  __ Move(rdi, cell);
   __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(function)) {
+  if (heap()->InNewSpace(*function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1290,30 +1226,26 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
     __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
-    __ j(not_equal, miss);
   } else {
-    __ Cmp(rdi, Handle<JSFunction>(function));
-    __ j(not_equal, miss);
+    __ Cmp(rdi, function);
   }
+  __ j(not_equal, miss);
 }
 
 
-MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj =
+void CallStubCompiler::GenerateMissBranch() {
+  Handle<Code> code =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_ic_state_);
-  Object* obj;
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
-  return obj;
+                                               extra_state_);
+  __ Jump(code, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -1353,7 +1285,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1361,19 +1293,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPushCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1383,10 +1315,9 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1396,14 +1327,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object),
-                  rdx,
-                  holder,
-                  rbx,
-                  rax,
-                  rdi,
-                  name,
-                  &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1421,7 +1346,7 @@
     __ j(not_equal, &call_builtin);
 
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
 
       // Get the array's length into rax and calculate new length.
       __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1435,30 +1360,40 @@
       __ cmpl(rax, rcx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
+      __ JumpIfNotSmi(rcx, &with_write_barrier);
+
       // Save new length.
       __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
       // Push the element.
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
       __ lea(rdx, FieldOperand(rbx,
                                rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ movq(Operand(rdx, 0), rcx);
 
-      // Check if value is a smi.
       __ Integer32ToSmi(rax, rax);  // Return new length as smi.
-
-      __ JumpIfNotSmi(rcx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(rbx, rcx, equal, &exit);
+      __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(rdi, &call_builtin);
 
-      __ RecordWriteHelper(rbx, rdx, rcx);
+      // Save new length.
+      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
+      // Push the element.
+      __ lea(rdx, FieldOperand(rbx,
+                               rax, times_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ movq(Operand(rdx, 0), rcx);
+
+      __ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
+
+      __ Integer32ToSmi(rax, rax);  // Return new length as smi.
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1466,6 +1401,15 @@
         __ jmp(&call_builtin);
       }
 
+      __ movq(rdi, Operand(rsp, argc * kPointerSize));
+      // Growing elements that are SMI-only requires special handling in case
+      // the new element is non-Smi. For now, delegate to the builtin.
+      Label no_fast_elements_check;
+      __ JumpIfSmi(rdi, &no_fast_elements_check);
+      __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+      __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
+      __ bind(&no_fast_elements_check);
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1489,16 +1433,22 @@
 
       // We fit and could grow elements.
       __ Store(new_space_allocation_top, rcx);
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
 
       // Push the argument...
-      __ movq(Operand(rdx, 0), rcx);
+      __ movq(Operand(rdx, 0), rdi);
       // ... and fill the rest with holes.
       __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
         __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to rdx as finish sequence assumes it's here.
       __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
@@ -1510,7 +1460,6 @@
       __ Integer32ToSmi(rax, rax);
       __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1522,19 +1471,19 @@
   }
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
-                                                   JSObject* holder,
-                                                   JSGlobalPropertyCell* cell,
-                                                   JSFunction* function,
-                                                   String* name) {
+Handle<Code> CallStubCompiler::CompileArrayPopCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1544,10 +1493,9 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
 
   Label miss, return_undefined, call_builtin;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1557,9 +1505,8 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(JSObject::cast(object), rdx,
-                  holder, rbx,
-                  rax, rdi, name, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, &miss);
 
   // Get the elements array of the object.
   __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1605,20 +1552,19 @@
       1);
 
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1628,7 +1574,7 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
 
   const int argc = arguments().immediate();
 
@@ -1636,13 +1582,11 @@
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
-
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+      (CallICBase::StringStubState::decode(extra_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
-
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1650,12 +1594,90 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             rax,
                                             &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
 
   Register receiver = rbx;
   Register index = rdi;
+  Register result = rax;
+  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharCodeAtGenerator generator(receiver,
+                                      index,
+                                      result,
+                                      &miss,  // When not a string.
+                                      &miss,  // When not a number.
+                                      index_out_of_range_label,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(rax, Heap::kNanValueRootIndex);
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in rcx.
+  __ Move(rcx, name);
+  __ bind(&name_miss);
+  GenerateMissBranch();
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+Handle<Code> CallStubCompiler::CompileStringCharAtCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
+  // ----------- S t a t e -------------
+  //  -- rcx                 : function name
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- ...
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+
+  const int argc = arguments().immediate();
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            rax,
+                                            &miss);
+  ASSERT(!object.is_identical_to(holder));
+  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+                  rax, holder, rbx, rdx, rdi, name, &miss);
+
+  Register receiver = rax;
+  Register index = rdi;
   Register scratch = rdx;
   Register result = rax;
   __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1665,130 +1687,42 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharCodeAtGenerator char_code_at_generator(receiver,
-                                                   index,
-                                                   scratch,
-                                                   result,
-                                                   &miss,  // When not a string.
-                                                   &miss,  // When not a number.
-                                                   index_out_of_range_label,
-                                                   STRING_INDEX_IS_NUMBER);
-  char_code_at_generator.GenerateFast(masm());
+  StringCharAtGenerator generator(receiver,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &miss,  // When not a string.
+                                  &miss,  // When not a number.
+                                  index_out_of_range_label,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_code_at_generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ LoadRoot(rax, Heap::kNanValueRootIndex);
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
-  __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-MaybeObject* CallStubCompiler::CompileStringCharAtCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
-  // ----------- S t a t e -------------
-  //  -- rcx                 : function name
-  //  -- rsp[0]              : return address
-  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
-  //  -- ...
-  //  -- rsp[(argc + 1) * 8] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
-
-  const int argc = arguments().immediate();
-
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_ic_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            rax,
-                                            &miss);
-  ASSERT(object != holder);
-  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                  rbx, rdx, rdi, name, &miss);
-
-  Register receiver = rax;
-  Register index = rdi;
-  Register scratch1 = rbx;
-  Register scratch2 = rdx;
-  Register result = rax;
-  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
-  } else {
-    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
-  }
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch1,
-                                          scratch2,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          index_out_of_range_label,
-                                          STRING_INDEX_IS_NUMBER);
-  char_at_generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
     __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
     __ ret((argc + 1) * kPointerSize);
   }
-
   __ bind(&miss);
   // Restore function name in rcx.
-  __ Move(rcx, Handle<String>(name));
+  __ Move(rcx, name);
   __ bind(&name_miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1797,25 +1731,23 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1830,17 +1762,17 @@
   // Convert the smi code to uint16.
   __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
 
-  StringCharFromCodeGenerator char_from_code_generator(code, rax);
-  char_from_code_generator.GenerateFast(masm());
+  StringCharFromCodeGenerator generator(code, rax);
+  generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  char_from_code_generator.GenerateSlow(masm(), call_helper);
+  generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1848,29 +1780,30 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
-                                                    JSObject* holder,
-                                                    JSGlobalPropertyCell* cell,
-                                                    JSFunction* function,
-                                                    String* name) {
+Handle<Code> CallStubCompiler::CompileMathFloorCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // TODO(872): implement this.
-  return heap()->undefined_value();
+  return Handle<Code>::null();
 }
 
 
-MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
-                                                  JSObject* holder,
-                                                  JSGlobalPropertyCell* cell,
-                                                  JSFunction* function,
-                                                  String* name) {
+Handle<Code> CallStubCompiler::CompileMathAbsCall(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1879,28 +1812,25 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
-  const int argc = arguments().immediate();
-
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+  const int argc = arguments().immediate();
+  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell == NULL) {
+  if (cell.is_null()) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
     __ JumpIfSmi(rdx, &miss);
-
-    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
-                    &miss);
+    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                    name, &miss);
   } else {
-    ASSERT(cell->value() == function);
-    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    ASSERT(cell->value() == *function);
+    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+                                &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
-
   // Load the (only) argument into rax.
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
@@ -1957,7 +1887,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1965,33 +1895,31 @@
 
   __ bind(&miss);
   // rcx: function name.
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
-  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileFastApiCall(
+Handle<Code> CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Object* object,
-    JSObject* holder,
-    JSGlobalPropertyCell* cell,
-    JSFunction* function,
-    String* name) {
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return heap()->undefined_value();
-  if (cell != NULL) return heap()->undefined_value();
-  if (!object->IsJSObject()) return heap()->undefined_value();
+  if (object->IsGlobalObject()) return Handle<Code>::null();
+  if (!cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSObject()) return Handle<Code>::null();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-            JSObject::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+      Handle<JSObject>::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
 
   Label miss, miss_before_stack_reserved;
-
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -2010,32 +1938,30 @@
   __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(JSObject::cast(object), rdx, holder,
-                  rbx, rax, rdi, name, depth, &miss);
+  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+                  name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ movq(rax, Operand(rsp, 3 * kPointerSize));
   __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
-  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
-  if (result->IsFailure()) return result;
+  GenerateFastApiCall(masm(), optimization, argc);
 
   __ bind(&miss);
   __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
+Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<JSFunction> function,
+                                                   Handle<String> name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
@@ -2048,16 +1974,14 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, NULL, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2074,14 +1998,13 @@
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
   Counters* counters = isolate()->counters();
-  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(counters->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rax, rdi, name, &miss);
+      CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
+                      rdi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2092,28 +2015,25 @@
       break;
 
     case STRING_CHECK:
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      } else {
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         // Check that the object is a two-byte string or a symbol.
         __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-
-    case NUMBER_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case NUMBER_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(rdx, &fast);
@@ -2123,18 +2043,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
-      }
-      break;
-    }
-
-    case BOOLEAN_CHECK: {
-      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      } else {
+      }
+      break;
+
+    case BOOLEAN_CHECK:
+      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
         Label fast;
         // Check that the object is a boolean.
         __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2145,17 +2065,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
-                        rbx, rdx, rdi, name, &miss);
+        CheckPrototypes(
+            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+            rax, holder, rbx, rdx, rdi, name, &miss);
+      } else {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
       }
       break;
-    }
-
-    default:
-      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2163,17 +2084,16 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2184,30 +2104,20 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
-  MaybeObject* result = compiler.Compile(masm(),
-                                         object,
-                                         holder,
-                                         name,
-                                         &lookup,
-                                         rdx,
-                                         rbx,
-                                         rdi,
-                                         rax,
-                                         &miss);
-  if (result->IsFailure()) return result;
+  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
+  compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
+                   &miss);
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2226,7 +2136,7 @@
 
   // Invoke the function.
   __ movq(rdi, rax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2234,19 +2144,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+Handle<Code> CallStubCompiler::CompileCallGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<JSFunction> function,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2258,23 +2168,17 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    MaybeObject* maybe_result = CompileCustomCall(
-        object, holder, cell, function, name);
-    Object* result;
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-    // undefined means bail out to regular compiler.
-    if (!result->IsUndefined()) return result;
+    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
   }
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
-
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2289,39 +2193,31 @@
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
-  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  if (V8::UseCrankshaft()) {
-    // TODO(kasperl): For now, we always call indirectly through the
-    // code field in the function to allow recompilation to take effect
-    // without changing any of the call sites.
-    __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  } else {
-    Handle<Code> code(function->code());
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
-                  NullCallWrapper(), call_kind);
-  }
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+                NullCallWrapper(), call_kind);
+
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  MaybeObject* maybe_result = GenerateMissBranch();
-  if (maybe_result->IsFailure()) return maybe_result;
+  GenerateMissBranch();
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                   int index,
-                                                  Map* transition,
-                                                  String* name) {
+                                                  Handle<Map> transition,
+                                                  Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2331,12 +2227,7 @@
   Label miss;
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     rdx, rcx, rbx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2344,13 +2235,14 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
-                                                     AccessorInfo* callback,
-                                                     String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+    Handle<JSObject> object,
+    Handle<AccessorInfo> callback,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2378,7 +2270,7 @@
 
   __ pop(rbx);  // remove the return address
   __ push(rdx);  // receiver
-  __ Push(Handle<AccessorInfo>(callback));  // callback info
+  __ Push(callback);  // callback info
   __ push(rcx);  // name
   __ push(rax);  // value
   __ push(rbx);  // restore return address
@@ -2398,8 +2290,9 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
-                                                        String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+    Handle<JSObject> receiver,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2447,9 +2340,10 @@
 }
 
 
-MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
-                                                   JSGlobalPropertyCell* cell,
-                                                   String* name) {
+Handle<Code> StoreStubCompiler::CompileStoreGlobal(
+    Handle<GlobalObject> object,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2463,17 +2357,20 @@
          Handle<Map>(object->map()));
   __ j(not_equal, &miss);
 
+  // Compute the cell operand to use.
+  __ Move(rbx, cell);
+  Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
+
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
-  __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
-                 Heap::kTheHoleValueRootIndex);
+  __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
   __ j(equal, &miss);
 
   // Store the value in the cell.
-  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+  __ movq(cell_operand, rax);
+  // Cells are always rescanned, so no write barrier here.
 
   // Return the value (register rax).
   Counters* counters = isolate()->counters();
@@ -2491,10 +2388,10 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
                                                        int index,
-                                                       Map* transition,
-                                                       String* name) {
+                                                       Handle<Map> transition,
+                                                       Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
@@ -2507,16 +2404,11 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rcx, Handle<String>(name));
+  __ Cmp(rcx, name);
   __ j(not_equal, &miss);
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(),
-                     object,
-                     index,
-                     transition,
-                     rdx, rcx, rbx,
-                     &miss);
+  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2525,39 +2417,38 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Code* stub;
+
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  MaybeObject* maybe_stub =
-      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(rdx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+
+  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -2565,18 +2456,22 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(rdx, &miss);
+  __ JumpIfSmi(rdx, &miss, Label::kNear);
 
-  Register map_reg = rbx;
-  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
-  for (int current = 0; current < receiver_count; ++current) {
+  for (int i = 0; i < receiver_count; ++i) {
     // Check map and tail call if there's a match
-    Handle<Map> map(receiver_maps->at(current));
-    __ Cmp(map_reg, map);
-    __ j(equal,
-         Handle<Code>(handler_ics->at(current)),
-         RelocInfo::CODE_TARGET);
+    __ Cmp(rdi, receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
   }
 
   __ bind(&miss);
@@ -2584,13 +2479,13 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
-                                                      JSObject* object,
-                                                      JSObject* last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
+                                                      Handle<JSObject> object,
+                                                      Handle<JSObject> last) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2609,15 +2504,8 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                  GlobalObject::cast(last),
-                                                  name,
-                                                  rdx,
-                                                  &miss);
-    if (cell->IsFailure()) {
-      miss.Unuse();
-      return cell;
-    }
+    GenerateCheckPropertyCell(
+        masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2629,14 +2517,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, heap()->empty_string());
+  return GetCode(NONEXISTENT, factory()->empty_string());
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
-                                                JSObject* holder,
+Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+                                                Handle<JSObject> holder,
                                                 int index,
-                                                String* name) {
+                                                Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2653,24 +2541,19 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
-                                                   JSObject* object,
-                                                   JSObject* holder,
-                                                   AccessorInfo* callback) {
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
-                                             rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2679,10 +2562,10 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
-                                                   JSObject* holder,
-                                                   Object* value,
-                                                   String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
+                                                   Handle<JSObject> holder,
+                                                   Handle<Object> value,
+                                                   Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2699,32 +2582,22 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                      JSObject* holder,
-                                                      String* name) {
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
+                                                      Handle<JSObject> holder,
+                                                      Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rax,
-                          rcx,
-                          rdx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
-
+  GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2733,11 +2606,12 @@
 }
 
 
-MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 String* name,
-                                                 bool is_dont_delete) {
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+    Handle<JSObject> object,
+    Handle<GlobalObject> holder,
+    Handle<JSGlobalPropertyCell> cell,
+    Handle<String> name,
+    bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2748,7 +2622,7 @@
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual loads. In this case,
   // the receiver cannot be a smi.
-  if (object != holder) {
+  if (!object.is_identical_to(holder)) {
     __ JumpIfSmi(rax, &miss);
   }
 
@@ -2756,7 +2630,7 @@
   CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
 
   // Get the value from the cell.
-  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+  __ Move(rbx, cell);
   __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2782,9 +2656,9 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                     JSObject* receiver,
-                                                     JSObject* holder,
+Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
+                                                     Handle<JSObject> receiver,
+                                                     Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- rax     : key
@@ -2797,7 +2671,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2811,34 +2685,27 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
-    String* name,
-    JSObject* receiver,
-    JSObject* holder,
-    AccessorInfo* callback) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
   //  -- rax     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
-  MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
-                                             rcx, rdi, callback, name, &miss);
-  if (result->IsFailure()) {
-    miss.Unuse();
-    return result;
-  }
-
+  GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
+                       name, &miss);
   __ bind(&miss);
-
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2847,10 +2714,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
-                                                        JSObject* receiver,
-                                                        JSObject* holder,
-                                                        Object* value) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
+    Handle<String> name,
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<Object> value) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2862,7 +2730,7 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -2876,35 +2744,27 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
-                                                           JSObject* holder,
-                                                           String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
+    Handle<JSObject> receiver,
+    Handle<JSObject> holder,
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
-
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
-  LookupResult lookup;
+  LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver,
-                          holder,
-                          &lookup,
-                          rdx,
-                          rax,
-                          rcx,
-                          rbx,
-                          rdi,
-                          name,
-                          &miss);
+  GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
+                          name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2914,7 +2774,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2926,7 +2787,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2939,7 +2800,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2951,7 +2813,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -2964,7 +2826,8 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
+    Handle<String> name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2976,7 +2839,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, Handle<String>(name));
+  __ Cmp(rax, name);
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -2989,32 +2852,29 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
+    Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
-  if (!maybe_stub->To(&stub)) return maybe_stub;
-  __ DispatchMap(rdx,
-                 Handle<Map>(receiver_map),
-                 Handle<Code>(stub),
-                 DO_SMI_CHECK);
+  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
+
+  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, factory()->empty_string());
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
-    MapList* receiver_maps,
-    CodeList* handler_ics) {
+Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+    MapHandleList* receiver_maps,
+    CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -3028,24 +2888,22 @@
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
     // Check map and tail call if there's a match
-    Handle<Map> map(receiver_maps->at(current));
-    __ Cmp(map_reg, map);
-    __ j(equal,
-         Handle<Code>(handler_ics->at(current)),
-         RelocInfo::CODE_TARGET);
+    __ Cmp(map_reg, receiver_maps->at(current));
+    __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
   }
 
   __  bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+Handle<Code> ConstructStubCompiler::CompileConstructStub(
+    Handle<JSFunction> function) {
   // ----------- S t a t e -------------
   //  -- rax : argc
   //  -- rdi : constructor
@@ -3088,12 +2946,8 @@
   // rbx: initial map
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ shl(rcx, Immediate(kPointerSizeLog2));
-  __ AllocateInNewSpace(rcx,
-                        rdx,
-                        rcx,
-                        no_reg,
-                        &generic_stub_call,
-                        NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+                        &generic_stub_call, NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // rbx: initial map
@@ -3118,7 +2972,7 @@
   // r9: first in-object property of the JSObject
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  SharedFunctionInfo* shared = function->shared();
+  Handle<SharedFunctionInfo> shared(function->shared());
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3166,10 +3020,8 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Code* code =
-      isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
-  Handle<Code> generic_construct_stub(code);
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(code, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3436,6 +3288,7 @@
       __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3503,6 +3356,7 @@
         case EXTERNAL_FLOAT_ELEMENTS:
         case EXTERNAL_DOUBLE_ELEMENTS:
         case FAST_ELEMENTS:
+        case FAST_SMI_ONLY_ELEMENTS:
         case FAST_DOUBLE_ELEMENTS:
         case DICTIONARY_ELEMENTS:
         case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3634,15 +3488,17 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic;
+  Label miss_force_generic, transition_elements_kind;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3665,13 +3521,22 @@
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ movq(rdx, rax);
-  __ SmiToInteger32(rcx, rcx);
-  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ RecordWrite(rdi, 0, rdx, rcx);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(rax, &transition_elements_kind);
+    __ SmiToInteger32(rcx, rcx);
+    __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+            rax);
+  } else {
+    // Do the store and update the write barrier.
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ SmiToInteger32(rcx, rcx);
+    __ lea(rcx,
+           FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+    __ movq(Operand(rcx, 0), rax);
+    // Make sure to preserve the value in register rax.
+    __ movq(rdx, rax);
+    __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3681,6 +3546,10 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
@@ -3693,8 +3562,7 @@
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic, transition_elements_kind;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3715,50 +3583,9 @@
   __ j(above_equal, &miss_force_generic);
 
   // Handle smi values specially
-  __ JumpIfSmi(rax, &smi_value, Label::kNear);
-
-  __ CheckMap(rax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rax, offset),
-          Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ bind(&have_double_value);
   __ SmiToInteger32(rcx, rcx);
-  __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
-           xmm0);
-  __ ret(0);
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  // Convert all NaNs to the same canonical NaN value when they are stored in
-  // the double array.
-  __ Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
-  __ movq(xmm0, kScratchRegister);
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ SmiToInteger32(rdx, rax);
-  __ push(rdx);
-  __ fild_s(Operand(rsp, 0));
-  __ pop(rdx);
-  __ SmiToInteger32(rcx, rcx);
-  __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+  __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+                                 &transition_elements_kind);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3766,6 +3593,12 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+
+  __ bind(&transition_elements_kind);
+  // Restore smi-tagging of rcx.
+  __ Integer32ToSmi(rcx, rcx);
+  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 4870105..7e506e7 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -53,6 +53,14 @@
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
 
+  // If the allocation size is divisible by 8 then we return an 8-byte aligned
+  // address.
+  if (kPointerSize == 4 && kAlignment == 4) {
+    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
+  } else {
+    ASSERT(kAlignment >= kPointerSize);
+  }
+
   // Check if the requested size is available without expanding.
   Address result = position_;
 
diff --git a/src/zone.h b/src/zone.h
index f60ac0d..2ca3c4d 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -86,7 +86,9 @@
   friend class Isolate;
   friend class ZoneScope;
 
-  // All pointers returned from New() have this alignment.
+  // All pointers returned from New() have this alignment.  In addition, if the
+  // object being allocated has a size that is divisible by 8 then its alignment
+  // will be 8.
   static const int kAlignment = kPointerSize;
 
   // Never allocate segments smaller than this size in bytes.
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 5c92671..621d8ec 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -73,7 +73,6 @@
     'test-fixed-dtoa.cc',
     'test-flags.cc',
     'test-func-name-inference.cc',
-    'test-hashing.cc',
     'test-hashmap.cc',
     'test-heap-profiler.cc',
     'test-heap.cc',
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 5d0cab3..3b8f4f6 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -68,6 +68,7 @@
         'test-fixed-dtoa.cc',
         'test-flags.cc',
         'test-func-name-inference.cc',
+        'test-hashing.cc',
         'test-hashmap.cc',
         'test-heap.cc',
         'test-heap-profiler.cc',
@@ -91,7 +92,8 @@
         'test-threads.cc',
         'test-unbound-queue.cc',
         'test-utils.cc',
-        'test-version.cc'
+        'test-version.cc',
+        'test-weakmaps.cc'
       ],
       'conditions': [
         ['v8_target_arch=="ia32"', {
@@ -134,6 +136,12 @@
           'sources': [
             'test-platform-win32.cc',
           ],
+          'msvs_settings': {
+            'VCCLCompilerTool': {
+              # MSVS wants this for gay-{precision,shortest}.cc.
+              'AdditionalOptions': ['/bigobj'],
+            },
+          },
         }],
         ['component=="shared_library"', {
           # cctest can't be built against a shared library, so we need to
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 5122da5..7161345 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,11 +33,22 @@
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# BUG(484): This test which we thought was originally corrected in r5236
+# is re-appearing. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
 # These tests always fail.  They are here to test test.py.  If
 # they don't fail then test.py has failed.
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+# TODO(gc): Temporarily disabled in the GC branch.
+test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
+
+# BUG(1261): Flakey test.
+test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
+
 # We do not yet shrink weak maps after they have been emptied by the GC
 test-weakmaps/Shrinking: FAIL
 
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index d95536d..b1900f9 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -241,7 +241,7 @@
   ApiTestFuzzer::Fuzz();
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   return v8::Integer::New(17);
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 9767192..cf9d488 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,11 +72,29 @@
   }
   CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
 
-  // Large object space.
-  while (!heap->OldGenerationAllocationLimitReached()) {
-    CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  // Old pointer space.
+  OldSpace* old_pointer_space = heap->old_pointer_space();
+  static const int kOldPointerSpaceFillerLength = 10000;
+  static const int kOldPointerSpaceFillerSize = FixedArray::SizeFor(
+      kOldPointerSpaceFillerLength);
+  while (old_pointer_space->Available() > kOldPointerSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+          IsFailure());
   }
-  CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+        IsFailure());
+
+  // Large object space.
+  static const int kLargeObjectSpaceFillerLength = 300000;
+  static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
+      kLargeObjectSpaceFillerLength);
+  ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
+  while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+          IsFailure());
+  }
+  CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+        IsFailure());
 
   // Map space.
   MapSpace* map_space = heap->map_space();
@@ -175,16 +193,16 @@
 // Plain old data class.  Represents a block of allocated memory.
 class Block {
  public:
-  Block(void* base_arg, int size_arg)
+  Block(Address base_arg, int size_arg)
       : base(base_arg), size(size_arg) {}
 
-  void *base;
+  Address base;
   int size;
 };
 
 
 TEST(CodeRange) {
-  const int code_range_size = 16*MB;
+  const int code_range_size = 32*MB;
   OS::Setup();
   Isolate::Current()->InitializeLoggingAndCounters();
   CodeRange* code_range = new CodeRange(Isolate::Current());
@@ -196,11 +214,15 @@
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
       // Allocate a block.
-      // Geometrically distributed sizes, greater than Page::kPageSize.
-      size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
-           Pseudorandom() % 5000 + 1;
+      // Geometrically distributed sizes, greater than
+      // Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
+      // TODO(gc): instead of using 3 use some contant based on code_range_size
+      // kMaxHeapObjectSize.
+      size_t requested =
+          (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
+          Pseudorandom() % 5000 + 1;
       size_t allocated = 0;
-      void* base = code_range->AllocateRawMemory(requested, &allocated);
+      Address base = code_range->AllocateRawMemory(requested, &allocated);
       CHECK(base != NULL);
       blocks.Add(Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c1c8aae..cc20b6f 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -80,6 +80,11 @@
   CHECK_EQ(expected, *ascii);
 }
 
+static void ExpectInt32(const char* code, int expected) {
+  Local<Value> result = CompileRun(code);
+  CHECK(result->IsInt32());
+  CHECK_EQ(expected, result->Int32Value());
+}
 
 static void ExpectBoolean(const char* code, bool expected) {
   Local<Value> result = CompileRun(code);
@@ -393,11 +398,11 @@
     CHECK(source->IsExternal());
     CHECK_EQ(resource,
              static_cast<TestResource*>(source->GetExternalStringResource()));
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   v8::internal::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -415,11 +420,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -441,11 +446,12 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  // TODO(1608): This should use kAbortIncrementalMarking.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -467,11 +473,12 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  // TODO(1608): This should use kAbortIncrementalMarking.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -484,7 +491,7 @@
   HEAP->CollectGarbage(i::NEW_SPACE);
   HEAP->CollectGarbage(i::NEW_SPACE);
 
-  uint16_t* two_byte_string = AsciiToTwoByteString("small");
+  uint16_t* two_byte_string = AsciiToTwoByteString("s1");
   Local<String> small_string = String::New(two_byte_string);
   i::DeleteArray(two_byte_string);
 
@@ -496,7 +503,7 @@
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
-  two_byte_string = AsciiToTwoByteString("small 2");
+  two_byte_string = AsciiToTwoByteString("small string 2");
   small_string = String::New(two_byte_string);
   i::DeleteArray(two_byte_string);
 
@@ -530,7 +537,7 @@
   HEAP->CollectGarbage(i::NEW_SPACE);
   HEAP->CollectGarbage(i::NEW_SPACE);
 
-  Local<String> small_string = String::New("small");
+  Local<String> small_string = String::New("s1");
   // We should refuse to externalize newly created small string.
   CHECK(!small_string->CanMakeExternal());
   // Trigger GCs so that the newly allocated string moves to old gen.
@@ -539,7 +546,7 @@
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
-  small_string = String::New("small 2");
+  small_string = String::New("small string 2");
   // We should refuse externalizing newly created small string.
   CHECK(!small_string->CanMakeExternal());
   for (int i = 0; i < 100; i++) {
@@ -572,8 +579,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -590,8 +597,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -672,11 +679,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
 
@@ -693,11 +700,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
 }
@@ -744,8 +751,8 @@
     CHECK_EQ(68, value->Int32Value());
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -1182,9 +1189,9 @@
   templ->Set("x", v8_num(200));
   templ->SetAccessor(v8_str("m"), GetM);
   LocalContext env(0, templ);
-  v8::Handle<v8::Object> obj = env->Global();
-  v8::Handle<Script> script = v8_compile("dummy()");
-  v8::Handle<Value> result = script->Run();
+  v8::Handle<v8::Object> obj(env->Global());
+  v8::Handle<Script> script(v8_compile("dummy()"));
+  v8::Handle<Value> result(script->Run());
   CHECK_EQ(13.4, result->NumberValue());
   CHECK_EQ(200, v8_compile("x")->Run()->Int32Value());
   CHECK_EQ(876, v8_compile("m")->Run()->Int32Value());
@@ -1294,6 +1301,197 @@
   return name;
 }
 
+// Helper functions for Interceptor/Accessor interaction tests
+
+Handle<Value> SimpleAccessorGetter(Local<String> name,
+                                   const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  return self->Get(String::Concat(v8_str("accessor_"), name));
+}
+
+void SimpleAccessorSetter(Local<String> name, Local<Value> value,
+                          const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  self->Set(String::Concat(v8_str("accessor_"), name), value);
+}
+
+Handle<Value> EmptyInterceptorGetter(Local<String> name,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> EmptyInterceptorSetter(Local<String> name,
+                                     Local<Value> value,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> InterceptorGetter(Local<String> name,
+                                const AccessorInfo& info) {
+  // Intercept names that start with 'interceptor_'.
+  String::AsciiValue ascii(name);
+  char* name_str = *ascii;
+  char prefix[] = "interceptor_";
+  int i;
+  for (i = 0; name_str[i] && prefix[i]; ++i) {
+    if (name_str[i] != prefix[i]) return Handle<Value>();
+  }
+  Handle<Object> self = info.This();
+  return self->GetHiddenValue(v8_str(name_str + i));
+}
+
+Handle<Value> InterceptorSetter(Local<String> name,
+                                Local<Value> value,
+                                const AccessorInfo& info) {
+  // Intercept accesses that set certain integer values.
+  if (value->IsInt32() && value->Int32Value() < 10000) {
+    Handle<Object> self = info.This();
+    self->SetHiddenValue(name, value);
+    return value;
+  }
+  return Handle<Value>();
+}
+
+void AddAccessor(Handle<FunctionTemplate> templ,
+                 Handle<String> name,
+                 v8::AccessorGetter getter,
+                 v8::AccessorSetter setter) {
+  templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+void AddInterceptor(Handle<FunctionTemplate> templ,
+                    v8::NamedPropertyGetter getter,
+                    v8::NamedPropertySetter setter) {
+  templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "Object.defineProperty(parent, 'age', "
+             "  {get: function(){ return this.accessor_age; }, "
+             "   set: function(v){ this.accessor_age = v; }, "
+             "   enumerable: true, configurable: true});"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "parent.name = 'Alice';");
+  ExpectBoolean("child.hasOwnProperty('name')", false);
+  ExpectString("child.name", "Alice");
+  CompileRun("child.name = 'Bob';");
+  ExpectString("child.name", "Bob");
+  ExpectBoolean("child.hasOwnProperty('name')", true);
+  ExpectString("parent.name", "Alice");
+}
+
+THREADED_TEST(SwitchFromInterceptorToAccessor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+}
+
+THREADED_TEST(SwitchFromAccessorToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
+
+THREADED_TEST(SwitchFromInterceptorToProperty) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to child's own property.
+  ExpectInt32("child.age", 10000);
+}
+
+THREADED_TEST(SwitchFromPropertyToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to child's own property.
+  ExpectInt32("child.age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
 
 THREADED_TEST(NamedPropertyHandlerGetter) {
   echo_named_call_count = 0;
@@ -1567,7 +1765,7 @@
 
   env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
   call_recursively_script = v8_compile("callScriptRecursively()");
-  v8::Handle<Value> result = call_recursively_script->Run();
+  v8::Handle<Value> result(call_recursively_script->Run());
   call_recursively_script = v8::Handle<Script>();
 
   env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
@@ -1666,12 +1864,12 @@
 
   // Check reading and writing aligned pointers.
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   // Check reading and writing unaligned pointers.
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1697,19 +1895,19 @@
   CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
 
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetInternalField(0, v8::External::Wrap(aligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   obj->SetInternalField(0, v8::External::Wrap(unaligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1722,7 +1920,7 @@
 
   // Ensure that the test starts with an fresh heap to test whether the hash
   // code is based on the address.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   Local<v8::Object> obj = v8::Object::New();
   int hash = obj->GetIdentityHash();
   int hash1 = obj->GetIdentityHash();
@@ -1732,7 +1930,7 @@
   // objects should not be assigned the same hash code. If the test below fails
   // the random number generator should be evaluated.
   CHECK_NE(hash, hash2);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   int hash3 = v8::Object::New()->GetIdentityHash();
   // Make sure that the identity hash is not based on the initial address of
   // the object alone. If the test below fails the random number generator
@@ -1769,7 +1967,7 @@
   v8::Local<v8::String> empty = v8_str("");
   v8::Local<v8::String> prop_name = v8_str("prop_name");
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure delete of a non-existent hidden value works
   CHECK(obj->DeleteHiddenValue(key));
@@ -1779,7 +1977,7 @@
   CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure we do not find the hidden property.
   CHECK(!obj->Has(empty));
@@ -1790,7 +1988,7 @@
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
   CHECK_EQ(2003, obj->Get(empty)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Add another property and delete it afterwards to force the object in
   // slow case.
@@ -1801,7 +1999,7 @@
   CHECK(obj->Delete(prop_name));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   CHECK(obj->DeleteHiddenValue(key));
   CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -1908,19 +2106,30 @@
 }
 
 
-static int NumberOfWeakCalls = 0;
+class WeakCallCounter {
+ public:
+  explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) { }
+  int id() { return id_; }
+  void increment() { number_of_weak_calls_++; }
+  int NumberOfWeakCalls() { return number_of_weak_calls_; }
+ private:
+  int id_;
+  int number_of_weak_calls_;
+};
+
+
 static void WeakPointerCallback(Persistent<Value> handle, void* id) {
-  CHECK_EQ(reinterpret_cast<void*>(1234), id);
-  NumberOfWeakCalls++;
+  WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
+  CHECK_EQ(1234, counter->id());
+  counter->increment();
   handle.Dispose();
 }
 
+
 THREADED_TEST(ApiObjectGroups) {
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
-
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
   Persistent<Object> g1c1;
@@ -1928,21 +2137,23 @@
   Persistent<Object> g2s2;
   Persistent<Object> g2c1;
 
+  WeakCallCounter counter(1234);
+
   {
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
     g1c1 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
     g2c1 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -1961,14 +2172,15 @@
     V8::AddObjectGroup(g2_objects, 2);
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC. Use kMakeHeapIterableMask to ensure that
+  // incremental garbage collection is stopped.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   // But make children strong roots---all the objects (except for children)
   // should be collectable now.
   g1c1.ClearWeak();
@@ -1986,17 +2198,17 @@
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All objects should be gone. 5 global handles in total.
-  CHECK_EQ(5, NumberOfWeakCalls);
+  CHECK_EQ(5, counter.NumberOfWeakCalls());
 
   // And now make children weak again and collect them.
-  g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-  g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+  g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
-  CHECK_EQ(7, NumberOfWeakCalls);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -2004,7 +2216,7 @@
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
+  WeakCallCounter counter(1234);
 
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
@@ -2017,18 +2229,18 @@
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g3s1 = Persistent<Object>::New(Object::New());
     g3s2 = Persistent<Object>::New(Object::New());
-    g3s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g3s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -2050,14 +2262,14 @@
     V8::AddObjectGroup(g3_objects, 2);
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
   // Groups are deleted, rebuild groups.
   {
@@ -2075,10 +2287,10 @@
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All objects should be gone. 7 global handles in total.
-  CHECK_EQ(7, NumberOfWeakCalls);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -4172,7 +4384,7 @@
 
   source = v8_str("undetectable.y = 2000;");
   script = Script::Compile(source);
-  Local<Value> result = script->Run();
+  Local<Value> result(script->Run());
   ExpectBoolean("undetectable.y == undefined", true);
 }
 
@@ -4305,6 +4517,47 @@
 }
 
 
+static const char* kEmbeddedExtensionSource =
+    "function Ret54321(){return 54321;}~~@@$"
+    "$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
+static const int kEmbeddedExtensionSourceValidLen = 34;
+
+
+THREADED_TEST(ExtensionMissingSourceLength) {
+  v8::HandleScope handle_scope;
+  v8::RegisterExtension(new Extension("srclentest_fail",
+                                      kEmbeddedExtensionSource));
+  const char* extension_names[] = { "srclentest_fail" };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  CHECK_EQ(0, *context);
+}
+
+
+THREADED_TEST(ExtensionWithSourceLength) {
+  for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
+       source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
+    v8::HandleScope handle_scope;
+    i::ScopedVector<char> extension_name(32);
+    i::OS::SNPrintF(extension_name, "ext #%d", source_len);
+    v8::RegisterExtension(new Extension(extension_name.start(),
+                                        kEmbeddedExtensionSource, 0, 0,
+                                        source_len));
+    const char* extension_names[1] = { extension_name.start() };
+    v8::ExtensionConfiguration extensions(1, extension_names);
+    v8::Handle<Context> context = Context::New(&extensions);
+    if (source_len == kEmbeddedExtensionSourceValidLen) {
+      Context::Scope lock(context);
+      v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
+      CHECK_EQ(v8::Integer::New(54321), result);
+    } else {
+      // Anything but exactly the right length should fail to compile.
+      CHECK_EQ(0, *context);
+    }
+  }
+}
+
+
 static const char* kEvalExtensionSource1 =
   "function UseEval1() {"
   "  var x = 42;"
@@ -4483,7 +4736,7 @@
                                                     "native\nfunction foo();"));
   const char* extension_names[] = { name };
   v8::ExtensionConfiguration extensions(1, extension_names);
-  v8::Handle<Context> context = Context::New(&extensions);
+  v8::Handle<Context> context(Context::New(&extensions));
   ASSERT(context.IsEmpty());
 }
 
@@ -4497,7 +4750,7 @@
       "nativ\\u0065 function foo();"));
   const char* extension_names[] = { name };
   v8::ExtensionConfiguration extensions(1, extension_names);
-  v8::Handle<Context> context = Context::New(&extensions);
+  v8::Handle<Context> context(Context::New(&extensions));
   ASSERT(context.IsEmpty());
 }
 
@@ -4664,7 +4917,7 @@
   Local<Script> script =
       Script::Compile(String::New(js_code_causing_huge_string_flattening));
   last_location = NULL;
-  Local<Value> result = script->Run();
+  Local<Value> result(script->Run());
 
   CHECK(false);  // Should not return.
 }
@@ -4805,7 +5058,7 @@
 
 
 static void InvokeMarkSweep() {
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -4898,7 +5151,7 @@
   CHECK_EQ(v8::Integer::New(3), args[2]);
   CHECK_EQ(v8::Undefined(), args[3]);
   v8::HandleScope scope;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Undefined();
 }
 
@@ -5185,67 +5438,109 @@
 
 
 THREADED_TEST(StringWrite) {
+  LocalContext context;
   v8::HandleScope scope;
   v8::Handle<String> str = v8_str("abcde");
   // abc<Icelandic eth><Unicode snowman>.
   v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
+  const int kStride = 4;  // Must match stride in for loops in JS below.
+  CompileRun(
+      "var left = '';"
+      "for (var i = 0; i < 0xd800; i += 4) {"
+      "  left = left + String.fromCharCode(i);"
+      "}");
+  CompileRun(
+      "var right = '';"
+      "for (var i = 0; i < 0xd800; i += 4) {"
+      "  right = String.fromCharCode(i) + right;"
+      "}");
+  v8::Handle<v8::Object> global = Context::GetCurrent()->Global();
+  Handle<String> left_tree = global->Get(v8_str("left")).As<String>();
+  Handle<String> right_tree = global->Get(v8_str("right")).As<String>();
 
   CHECK_EQ(5, str2->Length());
+  CHECK_EQ(0xd800 / kStride, left_tree->Length());
+  CHECK_EQ(0xd800 / kStride, right_tree->Length());
 
   char buf[100];
-  char utf8buf[100];
+  char utf8buf[0xd800 * 3];
   uint16_t wbuf[100];
   int len;
   int charlen;
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
   CHECK_EQ(9, len);
   CHECK_EQ(5, charlen);
   CHECK_EQ(0, strcmp(utf8buf, "abc\303\260\342\230\203"));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 8, &charlen);
   CHECK_EQ(8, len);
   CHECK_EQ(5, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\342\230\203\1", 9));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 7, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 6, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 5, &charlen);
   CHECK_EQ(5, len);
   CHECK_EQ(4, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\303\260\1", 5));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 4, &charlen);
   CHECK_EQ(3, len);
   CHECK_EQ(3, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 3, &charlen);
   CHECK_EQ(3, len);
   CHECK_EQ(3, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "abc\1", 4));
 
-  memset(utf8buf, 0x1, sizeof(utf8buf));
+  memset(utf8buf, 0x1, 1000);
   len = str2->WriteUtf8(utf8buf, 2, &charlen);
   CHECK_EQ(2, len);
   CHECK_EQ(2, charlen);
   CHECK_EQ(0, strncmp(utf8buf, "ab\1", 3));
 
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = left_tree->Utf8Length();
+  int utf8_expected =
+      (0x80 + (0x800 - 0x80) * 2 + (0xd800 - 0x800) * 3) / kStride;
+  CHECK_EQ(utf8_expected, len);
+  len = left_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+  CHECK_EQ(utf8_expected, len);
+  CHECK_EQ(0xd800 / kStride, charlen);
+  CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[utf8_expected - 3]));
+  CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[utf8_expected - 2]));
+  CHECK_EQ(0xc0 - kStride,
+           static_cast<unsigned char>(utf8buf[utf8_expected - 1]));
+  CHECK_EQ(1, utf8buf[utf8_expected]);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = right_tree->Utf8Length();
+  CHECK_EQ(utf8_expected, len);
+  len = right_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+  CHECK_EQ(utf8_expected, len);
+  CHECK_EQ(0xd800 / kStride, charlen);
+  CHECK_EQ(0xed, static_cast<unsigned char>(utf8buf[0]));
+  CHECK_EQ(0x9f, static_cast<unsigned char>(utf8buf[1]));
+  CHECK_EQ(0xc0 - kStride, static_cast<unsigned char>(utf8buf[2]));
+  CHECK_EQ(1, utf8buf[utf8_expected]);
+
   memset(buf, 0x1, sizeof(buf));
   memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf);
@@ -5400,7 +5695,7 @@
   v8::Handle<String> message = v8_str("message");
   v8::Handle<Value> range_error = v8::Exception::RangeError(foo);
   CHECK(range_error->IsObject());
-  v8::Handle<v8::Object> range_obj = range_error.As<v8::Object>();
+  v8::Handle<v8::Object> range_obj(range_error.As<v8::Object>());
   CHECK(range_error.As<v8::Object>()->Get(message)->Equals(foo));
   v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo);
   CHECK(reference_error->IsObject());
@@ -6970,7 +7265,7 @@
     // Create new environment reusing the global object.
     LocalContext env(NULL, instance_template, global_object);
     env->Global()->Set(v8_str("foo"), foo);
-    Local<Value> value = Script::Compile(v8_str("foo()"))->Run();
+    Local<Value> value(Script::Compile(v8_str("foo()"))->Run());
   }
 }
 
@@ -7158,6 +7453,60 @@
 }
 
 
+// Getting property names of an object with a prototype chain that
+// triggers dictionary elements in GetLocalPropertyNames() shouldn't
+// crash the runtime.
+THREADED_TEST(Regress91517) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope handle_scope;
+  LocalContext context;
+
+  Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New();
+  t1->SetHiddenPrototype(true);
+  t1->InstanceTemplate()->Set(v8_str("foo"), v8_num(1));
+  Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New();
+  t2->SetHiddenPrototype(true);
+  t2->InstanceTemplate()->Set(v8_str("fuz1"), v8_num(2));
+  t2->InstanceTemplate()->Set(v8_str("objects"), v8::Object::New());
+  t2->InstanceTemplate()->Set(v8_str("fuz2"), v8_num(2));
+  Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New();
+  t3->SetHiddenPrototype(true);
+  t3->InstanceTemplate()->Set(v8_str("boo"), v8_num(3));
+  Local<v8::FunctionTemplate> t4 = v8::FunctionTemplate::New();
+  t4->InstanceTemplate()->Set(v8_str("baz"), v8_num(4));
+
+  // Force dictionary-based properties.
+  i::ScopedVector<char> name_buf(1024);
+  for (int i = 1; i <= 1000; i++) {
+    i::OS::SNPrintF(name_buf, "sdf%d", i);
+    t2->InstanceTemplate()->Set(v8_str(name_buf.start()), v8_num(2));
+  }
+
+  Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
+  Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
+  Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
+  Local<v8::Object> o4 = t4->GetFunction()->NewInstance();
+
+  // Create prototype chain of hidden prototypes.
+  CHECK(o4->SetPrototype(o3));
+  CHECK(o3->SetPrototype(o2));
+  CHECK(o2->SetPrototype(o1));
+
+  // Call the runtime version of GetLocalPropertyNames() on the natively
+  // created object through JavaScript.
+  context->Global()->Set(v8_str("obj"), o4);
+  CompileRun("var names = %GetLocalPropertyNames(obj);");
+
+  ExpectInt32("names.length", 1006);
+  ExpectTrue("names.indexOf(\"baz\") >= 0");
+  ExpectTrue("names.indexOf(\"boo\") >= 0");
+  ExpectTrue("names.indexOf(\"foo\") >= 0");
+  ExpectTrue("names.indexOf(\"fuz1\") >= 0");
+  ExpectTrue("names.indexOf(\"fuz2\") >= 0");
+  ExpectFalse("names[1005] == undefined");
+}
+
+
 THREADED_TEST(FunctionReadOnlyPrototype) {
   v8::HandleScope handle_scope;
   LocalContext context;
@@ -7241,7 +7590,7 @@
   Local<Function> cons = templ->GetFunction();
   context->Global()->Set(v8_str("Fun"), cons);
   Local<v8::Object> inst = cons->NewInstance();
-  i::Handle<i::JSObject> obj = v8::Utils::OpenHandle(*inst);
+  i::Handle<i::JSObject> obj(v8::Utils::OpenHandle(*inst));
   Local<Value> value = CompileRun("(new Fun()).constructor === Fun");
   CHECK(value->BooleanValue());
 }
@@ -7492,9 +7841,11 @@
                            "  var bar = 2;"
                            "  with (x) { return eval('bar'); }"
                            "}"
-                           "f(this)"));
+                           "result4 = f(this)"));
   script->Run();
-  CHECK(try_catch.HasCaught());
+  CHECK(!try_catch.HasCaught());
+  CHECK_EQ(2, current->Global()->Get(v8_str("result4"))->Int32Value());
+
   try_catch.Reset();
 }
 
@@ -7710,7 +8061,7 @@
   }
 
   { Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
-    Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+    Local<ObjectTemplate> instance_template(t->InstanceTemplate());
     Local<v8::Object> instance = t->GetFunction()->NewInstance();
     context->Global()->Set(v8_str("obj2"), instance);
     v8::TryCatch try_catch;
@@ -7815,7 +8166,7 @@
   v8::HandleScope scope;
   if (depth == 0) return CountHandles();
   for (int i = 0; i < iterations; i++) {
-    Local<v8::Number> n = v8::Integer::New(42);
+    Local<v8::Number> n(v8::Integer::New(42));
   }
   return Recurse(depth - 1, iterations);
 }
@@ -7829,7 +8180,7 @@
     v8::HandleScope scope1;
     CHECK_EQ(0, CountHandles());
     for (int i = 0; i < kIterations; i++) {
-      Local<v8::Number> n = v8::Integer::New(42);
+      Local<v8::Number> n(v8::Integer::New(42));
       CHECK_EQ(i + 1, CountHandles());
     }
 
@@ -7837,7 +8188,7 @@
     {
       v8::HandleScope scope2;
       for (int j = 0; j < kIterations; j++) {
-        Local<v8::Number> n = v8::Integer::New(42);
+        Local<v8::Number> n(v8::Integer::New(42));
         CHECK_EQ(j + 1 + kIterations, CountHandles());
       }
     }
@@ -7883,7 +8234,7 @@
     Local<String> name,
     const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Handle<Value>();
 }
 
@@ -8340,10 +8691,10 @@
                                  0, 0, 0, v8_str("data"));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "for (var i = 0; i < 1000; i++) {"
     "  o.x = 42;"
-    "}");
+    "}"));
 }
 
 
@@ -8613,7 +8964,7 @@
   int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
   ++(*call_count);
   if ((*call_count) % 20 == 0) {
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   }
   return v8::Handle<Value>();
 }
@@ -8769,11 +9120,11 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = o.method(41);"
-      "}");
+      "}"));
   CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(100, interceptor_call_count);
 }
@@ -8796,14 +9147,14 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = receiver.method(41);"
-      "}");
+      "}"));
   CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(100, interceptor_call_count);
 }
@@ -8826,7 +9177,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8838,7 +9189,7 @@
       "    saved_result = result;"
       "    receiver = {method: function(x) { return x - 1 }};"
       "  }"
-      "}");
+      "}"));
   CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
   CHECK_GE(interceptor_call_count, 50);
@@ -8862,7 +9213,7 @@
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8874,7 +9225,7 @@
       "    saved_result = result;"
       "    o.method = function(x) { return x - 1 };"
       "  }"
-      "}");
+      "}"));
   CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
   CHECK_GE(interceptor_call_count, 50);
@@ -8899,7 +9250,7 @@
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8911,7 +9262,7 @@
       "    saved_result = result;"
       "    receiver = 333;"
       "  }"
-      "}");
+      "}"));
   CHECK(try_catch.HasCaught());
   CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
            try_catch.Exception()->ToString());
@@ -8938,7 +9289,7 @@
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -8950,7 +9301,7 @@
       "    saved_result = result;"
       "    receiver = {method: receiver.method};"
       "  }"
-      "}");
+      "}"));
   CHECK(try_catch.HasCaught());
   CHECK_EQ(v8_str("TypeError: Illegal invocation"),
            try_catch.Exception()->ToString());
@@ -8967,16 +9318,16 @@
                                 v8::Handle<v8::Signature>());
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = o.method(41);"
-      "}");
+      "}"));
 
   CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
 }
@@ -8990,19 +9341,19 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
       "var result = 0;"
       "for (var i = 0; i < 100; i++) {"
       "  result = receiver.method(41);"
-      "}");
+      "}"));
 
   CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
 }
@@ -9016,12 +9367,12 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -9033,7 +9384,7 @@
       "    saved_result = result;"
       "    receiver = {method: function(x) { return x - 1 }};"
       "  }"
-      "}");
+      "}"));
   CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
 }
@@ -9047,13 +9398,13 @@
                                 v8::Signature::New(fun_templ));
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
-  v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+  v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
   context->Global()->Set(v8_str("o"), fun->NewInstance());
   v8::TryCatch try_catch;
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
       "o.foo = 17;"
       "var receiver = {};"
       "receiver.__proto__ = o;"
@@ -9065,7 +9416,7 @@
       "    saved_result = result;"
       "    receiver = 333;"
       "  }"
-      "}");
+      "}"));
   CHECK(try_catch.HasCaught());
   CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
            try_catch.Exception()->ToString());
@@ -9093,7 +9444,7 @@
   templ->SetNamedPropertyHandler(NoBlockGetterX);
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "proto = new Object();"
     "proto.y = function(x) { return x + 1; };"
     "proto.z = function(x) { return x - 1; };"
@@ -9103,7 +9454,7 @@
     "for (var i = 0; i < 10; i++) {"
     "  if (i == 5) { method = 'z'; };"
     "  result += o[method](41);"
-    "}");
+    "}"));
   CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
 }
 
@@ -9119,7 +9470,7 @@
   context->Global()->Set(v8_str("proto1"), templ->NewInstance());
   keyed_call_ic_function =
       v8_compile("function f(x) { return x - 1; }; f")->Run();
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "o = new Object();"
     "proto2 = new Object();"
     "o.y = function(x) { return x + 1; };"
@@ -9131,7 +9482,7 @@
     "for (var i = 0; i < 10; i++) {"
     "  if (i == 5) { method = 'y'; };"
     "  result += o[method](41);"
-    "}");
+    "}"));
   CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
 }
 
@@ -9144,7 +9495,7 @@
   templ->SetNamedPropertyHandler(NoBlockGetterX);
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "function inc(x) { return x + 1; };"
     "inc(1);"
     "function dec(x) { return x - 1; };"
@@ -9157,7 +9508,7 @@
     "for (var i = 0; i < 10; i++) {"
     "  if (i == 5) { method = 'y'; };"
     "  result += o[method](41);"
-    "}");
+    "}"));
   CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
 }
 
@@ -9170,7 +9521,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "function len(x) { return x.length; };"
     "o.__proto__ = this;"
     "var m = 'parseFloat';"
@@ -9181,7 +9532,7 @@
     "    saved_result = result;"
     "  };"
     "  result = o[m]('239');"
-    "}");
+    "}"));
   CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
   CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
 }
@@ -9194,7 +9545,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "var o = new Object();"
     "o.__proto__ = proto;"
     "o.method = function(x) { return x + 1; };"
@@ -9203,7 +9554,7 @@
     "for (var i = 0; i < 10; i++) {"
     "  if (i == 5) { o.method = function(x) { return x - 1; }; };"
     "  result += o[m](41);"
-    "}");
+    "}"));
   CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
 }
 
@@ -9216,7 +9567,7 @@
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
-  v8::Handle<Value> value = CompileRun(
+  v8::Handle<Value> value(CompileRun(
     "var proto = new Object();"
     "o.__proto__ = proto;"
     "proto.method = function(x) { return x + 1; };"
@@ -9225,7 +9576,7 @@
     "for (var i = 0; i < 10; i++) {"
     "  if (i == 5) { proto.method = function(x) { return x - 1; }; };"
     "  result += o[m](41);"
-    "}");
+    "}"));
   CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
 }
 
@@ -9964,6 +10315,7 @@
 
 
 static int GetGlobalObjectsCount() {
+  i::Isolate::Current()->heap()->EnsureHeapIsIterable();
   int count = 0;
   i::HeapIterator it;
   for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
@@ -9978,9 +10330,8 @@
   // the first garbage collection but some of the maps have already
   // been marked at that point.  Therefore some of the maps are not
   // collected until the second garbage collection.
-  HEAP->global_context_map();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   int count = GetGlobalObjectsCount();
 #ifdef DEBUG
   if (count != expected) HEAP->TracePathToGlobal();
@@ -10049,7 +10400,7 @@
   // weak callback of the first handle would be able to 'reallocate' it.
   handle1.MakeWeak(NULL, NewPersistentHandleCallback);
   handle2.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10057,7 +10408,7 @@
 
 void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
   to_be_disposed.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   handle.Dispose();
 }
 
@@ -10073,7 +10424,7 @@
   }
   handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
   to_be_disposed = handle2;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
@@ -10099,7 +10450,7 @@
   }
   handle2.MakeWeak(NULL, DisposingCallback);
   handle3.MakeWeak(NULL, HandleCreatingCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10141,7 +10492,7 @@
   v8::Persistent<Context> env = Context::New();
   env->Enter();
   v8::Handle<Value> value = NestedScope(env);
-  v8::Handle<String> str = value->ToString();
+  v8::Handle<String> str(value->ToString());
   env->Exit();
   env.Dispose();
 }
@@ -10149,7 +10500,7 @@
 
 THREADED_TEST(ExternalAllocatedMemory) {
   v8::HandleScope outer;
-  v8::Persistent<Context> env = Context::New();
+  v8::Persistent<Context> env(Context::New());
   const int kSize = 1024*1024;
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
@@ -10487,7 +10838,7 @@
   i::Handle<i::FunctionTemplateInfo> constructor(
       i::FunctionTemplateInfo::cast(internal_template->constructor()));
   CHECK(!constructor->access_check_info()->IsUndefined());
-  v8::Persistent<Context> context0 = Context::New(NULL, global_template);
+  v8::Persistent<Context> context0(Context::New(NULL, global_template));
   CHECK(!constructor->access_check_info()->IsUndefined());
 }
 
@@ -10915,7 +11266,7 @@
       {
         v8::Locker lock;
         // TODO(lrn): Perhaps create some garbage before collecting.
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11037,7 +11388,7 @@
     while (gc_during_apply_ < kRequiredGCs) {
       {
         v8::Locker lock;
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11187,6 +11538,7 @@
 // Test that we can still flatten a string if the components it is built up
 // from have been turned into 16 bit strings in the mean time.
 THREADED_TEST(MorphCompositeStringTest) {
+  char utf_buffer[129];
   const char* c_string = "Now is the time for all good men"
                          " to come to the aid of the party";
   uint16_t* two_byte_string = AsciiToTwoByteString(c_string);
@@ -11215,6 +11567,17 @@
     MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
     MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
 
+    // This should UTF-8 without flattening, since everything is ASCII.
+    Handle<String> cons = v8_compile("cons")->Run().As<String>();
+    CHECK_EQ(128, cons->Utf8Length());
+    int nchars = -1;
+    CHECK_EQ(129, cons->WriteUtf8(utf_buffer, -1, &nchars));
+    CHECK_EQ(128, nchars);
+    CHECK_EQ(0, strcmp(
+        utf_buffer,
+        "Now is the time for all good men to come to the aid of the party"
+        "Now is the time for all good men to come to the aid of the party"));
+
     // Now do some stuff to make sure the strings are flattened, etc.
     CompileRun(
         "/[^a-z]/.test(cons);"
@@ -11753,13 +12116,15 @@
   i::Handle<i::ExternalPixelArray> pixels =
       i::Handle<i::ExternalPixelArray>::cast(
           FACTORY->NewExternalArray(kElementCount,
-                                       v8::kExternalPixelArray,
-                                       pixel_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+                                    v8::kExternalPixelArray,
+                                    pixel_data));
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     pixels->set(i, i % 256);
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(i % 256, pixels->get_scalar(i));
     CHECK_EQ(i % 256, pixel_data[i]);
@@ -12235,11 +12600,13 @@
   i::Handle<ExternalArrayClass> array =
       i::Handle<ExternalArrayClass>::cast(
           FACTORY->NewExternalArray(kElementCount, array_type, array_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     array->set(i, static_cast<ElementType>(i));
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(static_cast<int64_t>(i),
              static_cast<int64_t>(array->get_scalar(i)));
@@ -12357,7 +12724,8 @@
                       "  }"
                       "}"
                       "sum;");
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(28, result->Int32Value());
 
   // Make sure out-of-range loads do not throw.
@@ -12546,11 +12914,11 @@
     const int kLargeElementCount = kXSize * kYSize * 4;
     ElementType* large_array_data =
         static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
-    i::Handle<ExternalArrayClass> large_array =
+    i::Handle<ExternalArrayClass> large_array(
         i::Handle<ExternalArrayClass>::cast(
             FACTORY->NewExternalArray(kLargeElementCount,
-                                         array_type,
-                                         array_data));
+                                      array_type,
+                                      array_data)));
     v8::Handle<v8::Object> large_obj = v8::Object::New();
     // Set the elements to be the external array.
     large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data,
@@ -12949,8 +13317,8 @@
     "}\n"
     "var x;eval('new foo();');";
   v8::Handle<v8::String> overview_src = v8::String::New(overview_source);
-  v8::Handle<Value> overview_result =
-      v8::Script::New(overview_src, origin)->Run();
+  v8::Handle<Value> overview_result(
+      v8::Script::New(overview_src, origin)->Run());
   ASSERT(!overview_result.IsEmpty());
   ASSERT(overview_result->IsObject());
 
@@ -12970,7 +13338,7 @@
   v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
   v8::Handle<v8::Script> detailed_script(
       v8::Script::New(detailed_src, &detailed_origin));
-  v8::Handle<Value> detailed_result = detailed_script->Run();
+  v8::Handle<Value> detailed_result(detailed_script->Run());
   ASSERT(!detailed_result.IsEmpty());
   ASSERT(detailed_result->IsObject());
 }
@@ -13250,7 +13618,13 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_number);
       // Check if quiet nan (bits 51..62 all set).
+#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+      // Most significant fraction bit for quiet nan is set to 0
+      // on MIPS architecture. Allowed by IEEE-754.
+      CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+#else
       CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+#endif
     }
 
     // Check that Date::New preserves non-NaNs in the date range and
@@ -13263,7 +13637,13 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_date);
       // Check if quiet nan (bits 51..62 all set).
+#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+      // Most significant fraction bit for quiet nan is set to 0
+      // on MIPS architecture. Allowed by IEEE-754.
+      CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
+#else
       CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+#endif
     }
   }
 }
@@ -13272,7 +13652,7 @@
 static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) {
   v8::HandleScope scope;
   v8::TryCatch tc;
-  v8::Handle<v8::String> str = args[0]->ToString();
+  v8::Handle<v8::String> str(args[0]->ToString());
   if (tc.HasCaught())
     return tc.ReThrow();
   return v8::Undefined();
@@ -13337,7 +13717,7 @@
     other_context->Enter();
     CompileRun(source_simple);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13359,7 +13739,7 @@
     other_context->Enter();
     CompileRun(source_eval);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13386,7 +13766,7 @@
     other_context->Enter();
     CompileRun(source_exception);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13434,6 +13814,41 @@
 }
 
 
+THREADED_TEST(ScriptColumnNumber) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
+      v8::Integer::New(3), v8::Integer::New(2));
+  v8::Handle<v8::String> script = v8::String::New(
+      "function foo() {}\n\n     function bar() {}");
+  v8::Script::Compile(script, &origin)->Run();
+  v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("foo")));
+  v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("bar")));
+  CHECK_EQ(14, foo->GetScriptColumnNumber());
+  CHECK_EQ(17, bar->GetScriptColumnNumber());
+}
+
+
+THREADED_TEST(FunctionGetScriptId) {
+  v8::HandleScope scope;
+  LocalContext env;
+  v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"),
+      v8::Integer::New(3), v8::Integer::New(2));
+  v8::Handle<v8::String> scriptSource = v8::String::New(
+      "function foo() {}\n\n     function bar() {}");
+  v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
+  script->Run();
+  v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("foo")));
+  v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("bar")));
+  CHECK_EQ(script->Id(), foo->GetScriptId());
+  CHECK_EQ(script->Id(), bar->GetScriptId());
+}
+
+
 static v8::Handle<Value> GetterWhichReturns42(Local<String> name,
                                               const AccessorInfo& info) {
   return v8_num(42);
@@ -13604,26 +14019,26 @@
   v8::V8::AddGCEpilogueCallback(EpilogueCallback);
   CHECK_EQ(0, prologue_call_count);
   CHECK_EQ(0, epilogue_call_count);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(1, prologue_call_count);
   CHECK_EQ(1, epilogue_call_count);
   v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(1, prologue_call_count_second);
   CHECK_EQ(1, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallback);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
   CHECK_EQ(2, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
@@ -13840,7 +14255,7 @@
 void FailedAccessCheckCallbackGC(Local<v8::Object> target,
                                  v8::AccessType type,
                                  Local<v8::Value> data) {
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -14414,7 +14829,7 @@
                  "})()",
                  "ReferenceError: cell is not defined");
     CompileRun("cell = \"new_second\";");
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     ExpectString("readCell()", "new_second");
     ExpectString("readCell()", "new_second");
   }
@@ -14535,7 +14950,7 @@
 
   // RegExps are objects on which you can set properties.
   re->Set(v8_str("property"), v8::Integer::New(32));
-  v8::Handle<v8::Value> value = CompileRun("re.property");
+  v8::Handle<v8::Value> value(CompileRun("re.property"));
   ASSERT_EQ(32, value->Int32Value());
 
   v8::TryCatch try_catch;
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 839b7f5..cdab8f7 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,15 +93,15 @@
   Label L, C;
 
   __ mov(edx, Operand(esp, 4));
-  __ xor_(eax, Operand(eax));  // clear eax
+  __ xor_(eax, eax);  // clear eax
   __ jmp(&C);
 
   __ bind(&L);
-  __ add(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ add(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -135,11 +135,11 @@
   __ jmp(&C);
 
   __ bind(&L);
-  __ imul(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ imul(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -275,10 +275,10 @@
   __ subsd(xmm0, xmm1);
   __ divsd(xmm0, xmm1);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
 
   CodeDesc desc;
@@ -314,12 +314,12 @@
   v8::internal::byte buffer[256];
   Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   __ mov(eax, Operand(esp, 4));
-  __ cvtsi2sd(xmm0, Operand(eax));
+  __ cvtsi2sd(xmm0, eax);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
   CodeDesc desc;
   assm.GetCode(&desc);
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 2d9b012..7f63ebc 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -270,8 +270,8 @@
   CHECK(!fun.is_null());
   bool has_pending_exception;
   Handle<JSObject> global(Isolate::Current()->context()->global());
-  Handle<Object> result =
-      Execution::Call(fun, global, 0, NULL, &has_pending_exception);
+  Handle<Object> result(
+      Execution::Call(fun, global, 0, NULL, &has_pending_exception));
   CHECK(has_pending_exception);
   CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
            ToObjectChecked()->Number());
@@ -305,10 +305,11 @@
   Handle<Object> fun1(fun1_object->ToObjectChecked());
   CHECK(fun1->IsJSFunction());
 
-  Object** argv[1] = {
-    Handle<Object>::cast(FACTORY->LookupAsciiSymbol("hello")).location()
-  };
-  Execution::Call(Handle<JSFunction>::cast(fun1), global, 1, argv,
+  Handle<Object> argv[] = { FACTORY->LookupAsciiSymbol("hello") };
+  Execution::Call(Handle<JSFunction>::cast(fun1),
+                  global,
+                  ARRAY_SIZE(argv),
+                  argv,
                   &has_pending_exception);
   CHECK(!has_pending_exception);
 }
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 45da6dc..a9e2836 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -409,11 +409,8 @@
 
 
 static Handle<Code> ComputeCallDebugBreak(int argc) {
-  CALL_HEAP_FUNCTION(
-      v8::internal::Isolate::Current(),
-      v8::internal::Isolate::Current()->stub_cache()->ComputeCallDebugBreak(
-          argc, Code::CALL_IC),
-      Code);
+  return Isolate::Current()->stub_cache()->ComputeCallDebugBreak(argc,
+                                                                 Code::CALL_IC);
 }
 
 
@@ -425,8 +422,8 @@
   CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
 
   // Collect garbage to ensure weak handles are cleared.
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // Iterate the head and check that there are no debugger related objects left.
   HeapIterator iterator;
@@ -859,7 +856,7 @@
 
   if (event == v8::Break) {
     break_point_hit_count++;
-    v8::Handle<v8::Function> fun = v8::Handle<v8::Function>::Cast(data);
+    v8::Handle<v8::Function> fun(v8::Handle<v8::Function>::Cast(data));
     ClearBreakPoint(debug_event_remove_break_point);
   }
 }
@@ -944,7 +941,7 @@
       HEAP->CollectGarbage(v8::internal::NEW_SPACE);
     } else {
       // Mark sweep compact.
-      HEAP->CollectAllGarbage(true);
+      HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     }
   }
 }
@@ -1417,8 +1414,7 @@
 // Call the function three times with different garbage collections in between
 // and make sure that the break point survives.
 static void CallAndGC(v8::Local<v8::Object> recv,
-                      v8::Local<v8::Function> f,
-                      bool force_compaction) {
+                      v8::Local<v8::Function> f) {
   break_point_hit_count = 0;
 
   for (int i = 0; i < 3; i++) {
@@ -1432,14 +1428,15 @@
     CHECK_EQ(2 + i * 3, break_point_hit_count);
 
     // Mark sweep (and perhaps compact) and call function.
-    HEAP->CollectAllGarbage(force_compaction);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     f->Call(recv, 0, NULL);
     CHECK_EQ(3 + i * 3, break_point_hit_count);
   }
 }
 
 
-static void TestBreakPointSurviveGC(bool force_compaction) {
+// Test that a break point can be set at a return store location.
+TEST(BreakPointSurviveGC) {
   break_point_hit_count = 0;
   v8::HandleScope scope;
   DebugLocalContext env;
@@ -1450,50 +1447,50 @@
 
   // Test IC store break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    v8::Local<v8::Function> bar(
+        CompileFunction(&env, "function foo(){}", "foo"));
     foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC load break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    v8::Local<v8::Function> bar(
+        CompileFunction(&env, "function foo(){}", "foo"));
     foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC call break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    v8::Local<v8::Function> bar(
+        CompileFunction(&env, "function foo(){}", "foo"));
     foo = CompileFunction(&env,
                           "function bar(){};function foo(){bar();}",
                           "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test return break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    v8::Local<v8::Function> bar(
+        CompileFunction(&env, "function foo(){}", "foo"));
     foo = CompileFunction(&env, "function foo(){}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test non IC break point with garbage collection.
   {
-    v8::Local<v8::Function> bar =
-        CompileFunction(&env, "function foo(){}", "foo");
+    v8::Local<v8::Function> bar(
+        CompileFunction(&env, "function foo(){}", "foo"));
     foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
 
   v8::Debug::SetDebugEventListener(NULL);
@@ -1501,13 +1498,6 @@
 }
 
 
-// Test that a break point can be set at a return store location.
-TEST(BreakPointSurviveGC) {
-  TestBreakPointSurviveGC(false);
-  TestBreakPointSurviveGC(true);
-}
-
-
 // Test that break points can be set using the global Debug object.
 TEST(BreakPointThroughJavaScript) {
   break_point_hit_count = 0;
@@ -2259,7 +2249,7 @@
   }
   f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   SetScriptBreakPointByNameFromJS("test.html", 3, -1);
 
@@ -3761,8 +3751,8 @@
   v8::internal::Isolate::Current()->TraceException(false);
 
   // Create functions for testing break on exception.
-  v8::Local<v8::Function> throws =
-      CompileFunction(&env, "function throws(){throw 1;}", "throws");
+  v8::Local<v8::Function> throws(
+      CompileFunction(&env, "function throws(){throw 1;}", "throws"));
   v8::Local<v8::Function> caught =
       CompileFunction(&env,
                       "function caught(){try {throws();} catch(e) {};}",
@@ -5557,10 +5547,10 @@
     v8::HandleScope scope;
 
     // Get the test functions again.
-    v8::Local<v8::Function> foo =
-      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
-    v8::Local<v8::Function> bar =
-      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+    v8::Local<v8::Function> foo(v8::Local<v8::Function>::Cast(
+        env->Global()->Get(v8::String::New("foo"))));
+    v8::Local<v8::Function> bar(v8::Local<v8::Function>::Cast(
+        env->Global()->Get(v8::String::New("foo"))));
 
     foo->Call(env->Global(), 0, NULL);
     CHECK_EQ(0, break_point_hit_count);
@@ -6037,7 +6027,7 @@
 
   EmptyExternalStringResource source_ext_str;
   v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
-  v8::Handle<v8::Script> evil_script = v8::Script::Compile(source);
+  v8::Handle<v8::Script> evil_script(v8::Script::Compile(source));
   Handle<i::ExternalTwoByteString> i_source(
       i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
   // This situation can happen if source was an external string disposed
@@ -6472,7 +6462,7 @@
 
   // Do garbage collection to ensure that only the script in this test will be
   // collected afterwards.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   script_collected_count = 0;
   v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -6484,7 +6474,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_count);
 
@@ -6520,7 +6510,7 @@
 
     // Do garbage collection to ensure that only the script in this test will be
     // collected afterwards.
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
     v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
     {
@@ -6531,7 +6521,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_message_count);
 
@@ -6685,7 +6675,7 @@
     break_point_hit_count++;
 
     v8::HandleScope scope;
-    v8::Handle<v8::String> json = message.GetJSON();
+    v8::Handle<v8::String> json(message.GetJSON());
 
     SendContinueCommand();
   } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
@@ -6696,7 +6686,7 @@
     isolate->stack_guard()->DebugBreak();
 
     // Force serialization to trigger some internal JS execution.
-    v8::Handle<v8::String> json = message.GetJSON();
+    v8::Handle<v8::String> json(message.GetJSON());
 
     // Restore previous state.
     if (is_debug_break) {
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 6198391..aa733c7 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -232,7 +232,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -240,7 +240,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 }
@@ -285,18 +285,18 @@
 
   { PresentPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Undefined());
   }
 
   { PresentPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Number::New(0));
   }
 }
 
@@ -341,7 +341,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initializetion
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -349,7 +349,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 
@@ -429,18 +429,20 @@
 
   { AppearingPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
   }
 
   { AppearingPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  //  x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
+                  // Result is undefined because declaration succeeded but
+                  // initialization to 0 failed (due to context behavior).
   }
 }
 
@@ -496,9 +498,9 @@
   { ReappearingPropertyContext context;
     context.Check("const x; var x = 0",
                   0,
-                  2,  // var declaration + const initialization
-                  4,  // 2 x declaration + 2 x initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  3,  // const declaration+initialization, var initialization
+                  3,  // 2 x declaration + var initialization
+                  EXPECT_RESULT, Undefined());
   }
 }
 
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
index 056c981..c713b02 100644
--- a/test/cctest/test-deoptimization.cc
+++ b/test/cctest/test-deoptimization.cc
@@ -237,7 +237,7 @@
 
   v8::Local<v8::Function> fun =
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
-  Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
+  Handle<v8::internal::JSFunction> f(v8::Utils::OpenHandle(*fun));
 }
 
 
diff --git a/test/cctest/test-dictionary.cc b/test/cctest/test-dictionary.cc
index 15a854b..793e228 100644
--- a/test/cctest/test-dictionary.cc
+++ b/test/cctest/test-dictionary.cc
@@ -38,6 +38,7 @@
 
 using namespace v8::internal;
 
+
 TEST(ObjectHashTable) {
   v8::HandleScope scope;
   LocalContext context;
@@ -66,7 +67,8 @@
   CHECK_EQ(table->NumberOfDeletedElements(), 1);
   CHECK_EQ(table->Lookup(*a), HEAP->undefined_value());
 
-  // Keys should map back to their respective values.
+  // Keys should map back to their respective values and also should get
+  // an identity hash code generated.
   for (int i = 0; i < 100; i++) {
     Handle<JSObject> key = FACTORY->NewJSArray(7);
     Handle<JSObject> value = FACTORY->NewJSArray(11);
@@ -74,12 +76,67 @@
     CHECK_EQ(table->NumberOfElements(), i + 1);
     CHECK_NE(table->FindEntry(*key), ObjectHashTable::kNotFound);
     CHECK_EQ(table->Lookup(*key), *value);
+    CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
   }
 
-  // Keys never added to the map should not be found.
-  for (int i = 0; i < 1000; i++) {
-    Handle<JSObject> o = FACTORY->NewJSArray(100);
-    CHECK_EQ(table->FindEntry(*o), ObjectHashTable::kNotFound);
-    CHECK_EQ(table->Lookup(*o), HEAP->undefined_value());
+  // Keys never added to the map which already have an identity hash
+  // code should not be found.
+  for (int i = 0; i < 100; i++) {
+    Handle<JSObject> key = FACTORY->NewJSArray(7);
+    CHECK(key->GetIdentityHash(ALLOW_CREATION)->ToObjectChecked()->IsSmi());
+    CHECK_EQ(table->FindEntry(*key), ObjectHashTable::kNotFound);
+    CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+    CHECK(key->GetIdentityHash(OMIT_CREATION)->ToObjectChecked()->IsSmi());
+  }
+
+  // Keys that don't have an identity hash should not be found and also
+  // should not get an identity hash code generated.
+  for (int i = 0; i < 100; i++) {
+    Handle<JSObject> key = FACTORY->NewJSArray(7);
+    CHECK_EQ(table->Lookup(*key), HEAP->undefined_value());
+    CHECK_EQ(key->GetIdentityHash(OMIT_CREATION), HEAP->undefined_value());
   }
 }
+
+
+#ifdef DEBUG
+TEST(ObjectHashSetCausesGC) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<ObjectHashSet> table = FACTORY->NewObjectHashSet(1);
+  Handle<JSObject> key = FACTORY->NewJSArray(0);
+
+  // Simulate a full heap so that generating an identity hash code
+  // in subsequent calls will request GC.
+  FLAG_gc_interval = 0;
+
+  // Calling Contains() should not cause GC ever.
+  CHECK(!table->Contains(*key));
+
+  // Calling Remove() should not cause GC ever.
+  CHECK(!table->Remove(*key)->IsFailure());
+
+  // Calling Add() should request GC by returning a failure.
+  CHECK(table->Add(*key)->IsRetryAfterGC());
+}
+#endif
+
+
+#ifdef DEBUG
+TEST(ObjectHashTableCausesGC) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Handle<ObjectHashTable> table = FACTORY->NewObjectHashTable(1);
+  Handle<JSObject> key = FACTORY->NewJSArray(0);
+
+  // Simulate a full heap so that generating an identity hash code
+  // in subsequent calls will request GC.
+  FLAG_gc_interval = 0;
+
+  // Calling Lookup() should not cause GC ever.
+  CHECK(table->Lookup(*key)->IsUndefined());
+
+  // Calling Put() should request GC by returning a failure.
+  CHECK(table->Put(*key, *key)->IsRetryAfterGC());
+}
+#endif
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 9f7d0bb..1e38e4e 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -63,9 +63,9 @@
 
   // Short immediate instructions
   __ adc(eax, 12345678);
-  __ add(Operand(eax), Immediate(12345678));
+  __ add(eax, Immediate(12345678));
   __ or_(eax, 12345678);
-  __ sub(Operand(eax), Immediate(12345678));
+  __ sub(eax, Immediate(12345678));
   __ xor_(eax, 12345678);
   __ and_(eax, 12345678);
   Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
@@ -75,7 +75,7 @@
   __ mov(ebx,  Operand(esp, ecx, times_2, 0));  // [esp+ecx*4]
 
   // ---- All instructions that I can think of
-  __ add(edx, Operand(ebx));
+  __ add(edx, ebx);
   __ add(edx, Operand(12, RelocInfo::NONE));
   __ add(edx, Operand(ebx, 0));
   __ add(edx, Operand(ebx, 16));
@@ -89,7 +89,7 @@
   __ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
 
   __ nop();
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ nop();
   __ adc(ecx, 12);
   __ adc(ecx, 1000);
@@ -116,16 +116,16 @@
     CpuFeatures::Scope fscope(RDTSC);
     __ rdtsc();
   }
-  __ movsx_b(edx, Operand(ecx));
-  __ movsx_w(edx, Operand(ecx));
-  __ movzx_b(edx, Operand(ecx));
-  __ movzx_w(edx, Operand(ecx));
+  __ movsx_b(edx, ecx);
+  __ movsx_w(edx, ecx);
+  __ movzx_b(edx, ecx);
+  __ movzx_w(edx, ecx);
 
   __ nop();
-  __ imul(edx, Operand(ecx));
-  __ shld(edx, Operand(ecx));
-  __ shrd(edx, Operand(ecx));
-  __ bts(Operand(edx), ecx);
+  __ imul(edx, ecx);
+  __ shld(edx, ecx);
+  __ shrd(edx, ecx);
+  __ bts(edx, ecx);
   __ bts(Operand(ebx, ecx, times_4, 0), ecx);
   __ nop();
   __ pushad();
@@ -146,9 +146,9 @@
   __ nop();
 
   __ add(edx, Operand(esp, 16));
-  __ add(edx, Operand(ecx));
-  __ mov_b(edx, Operand(ecx));
-  __ mov_b(Operand(ecx), 6);
+  __ add(edx, ecx);
+  __ mov_b(edx, ecx);
+  __ mov_b(ecx, 6);
   __ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
   __ mov_b(Operand(esp, 16), edx);
   __ mov_w(edx, Operand(esp, 16));
@@ -216,22 +216,20 @@
 
   __ adc(edx, 12345);
 
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
 
   __ and_(ebx, 12345);
 
   __ cmp(ebx, 12345);
-  __ cmp(Operand(ebx), Immediate(12));
+  __ cmp(ebx, Immediate(12));
   __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
+  __ cmpb(eax, 100);
 
   __ or_(ebx, 12345);
 
-  __ sub(Operand(ebx), Immediate(12));
+  __ sub(ebx, Immediate(12));
   __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
-  __ subb(Operand(edx, ecx, times_4, 10000), 100);
-  __ subb(Operand(eax), 100);
-  __ subb(eax, Operand(edx, ecx, times_4, 10000));
 
   __ xor_(ebx, 12345);
 
@@ -244,7 +242,7 @@
   __ stos();
 
   __ sub(edx, Operand(ebx, ecx, times_4, 10000));
-  __ sub(edx, Operand(ebx));
+  __ sub(edx, ebx);
 
   __ test(edx, Immediate(12345));
   __ test(edx, Operand(ebx, ecx, times_8, 10000));
@@ -446,8 +444,8 @@
   {
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
-      __ pextrd(Operand(eax), xmm0, 1);
-      __ pinsrd(xmm1, Operand(eax), 0);
+      __ pextrd(eax, xmm0, 1);
+      __ pinsrd(xmm1, eax, 0);
     }
   }
 
diff --git a/test/cctest/test-hashing.cc b/test/cctest/test-hashing.cc
index a626510..9c342a9 100644
--- a/test/cctest/test-hashing.cc
+++ b/test/cctest/test-hashing.cc
@@ -46,108 +46,66 @@
 
 static v8::Persistent<v8::Context> env;
 
-#define __ masm->
+#define __ assm->
 
 
-void generate(MacroAssembler* masm, i::Vector<const char> string) {
-  // GenerateHashInit takes the first character as an argument so it can't
-  // handle the zero length string.
-  ASSERT(string.length() > 0);
+void generate(MacroAssembler* assm, i::Vector<const char> string) {
 #ifdef V8_TARGET_ARCH_IA32
   __ push(ebx);
   __ push(ecx);
   __ mov(eax, Immediate(0));
-  __ mov(ebx, Immediate(string.at(0)));
-  StringHelper::GenerateHashInit(masm, eax, ebx, ecx);
+  if (string.length() > 0) {
+    __ mov(ebx, Immediate(string.at(0)));
+    StringHelper::GenerateHashInit(assm, eax, ebx, ecx);
+  }
   for (int i = 1; i < string.length(); i++) {
     __ mov(ebx, Immediate(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, eax, ebx, ecx);
+    StringHelper::GenerateHashAddCharacter(assm, eax, ebx, ecx);
   }
-  StringHelper::GenerateHashGetHash(masm, eax, ecx);
+  StringHelper::GenerateHashGetHash(assm, eax, ecx);
   __ pop(ecx);
   __ pop(ebx);
   __ Ret();
 #elif V8_TARGET_ARCH_X64
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
   __ push(rbx);
   __ push(rcx);
   __ movq(rax, Immediate(0));
-  __ movq(rbx, Immediate(string.at(0)));
-  StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
+  if (string.length() > 0) {
+    __ movq(rbx, Immediate(string.at(0)));
+    StringHelper::GenerateHashInit(assm, rax, rbx, rcx);
+  }
   for (int i = 1; i < string.length(); i++) {
     __ movq(rbx, Immediate(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
+    StringHelper::GenerateHashAddCharacter(assm, rax, rbx, rcx);
   }
-  StringHelper::GenerateHashGetHash(masm, rax, rcx);
+  StringHelper::GenerateHashGetHash(assm, rax, rcx);
   __ pop(rcx);
   __ pop(rbx);
-  __ pop(kRootRegister);
   __ Ret();
 #elif V8_TARGET_ARCH_ARM
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-
   __ mov(r0, Operand(0));
-  __ mov(ip, Operand(string.at(0)));
-  StringHelper::GenerateHashInit(masm, r0, ip);
+  if (string.length() > 0) {
+    __ mov(ip, Operand(string.at(0)));
+    StringHelper::GenerateHashInit(assm, r0, ip);
+  }
   for (int i = 1; i < string.length(); i++) {
     __ mov(ip, Operand(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, r0, ip);
+    StringHelper::GenerateHashAddCharacter(assm, r0, ip);
   }
-  StringHelper::GenerateHashGetHash(masm, r0);
-  __ pop(kRootRegister);
+  StringHelper::GenerateHashGetHash(assm, r0);
   __ mov(pc, Operand(lr));
 #elif V8_TARGET_ARCH_MIPS
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-
   __ li(v0, Operand(0));
-  __ li(t1, Operand(string.at(0)));
-  StringHelper::GenerateHashInit(masm, v0, t1);
+  if (string.length() > 0) {
+    __ li(t1, Operand(string.at(0)));
+    StringHelper::GenerateHashInit(assm, v0, t1);
+  }
   for (int i = 1; i < string.length(); i++) {
     __ li(t1, Operand(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, v0, t1);
+    StringHelper::GenerateHashAddCharacter(assm, v0, t1);
   }
-  StringHelper::GenerateHashGetHash(masm, v0);
-  __ pop(kRootRegister);
+  StringHelper::GenerateHashGetHash(assm, v0);
   __ jr(ra);
-  __ nop();
-#endif
-}
-
-
-void generate(MacroAssembler* masm, uint32_t key) {
-#ifdef V8_TARGET_ARCH_IA32
-  __ push(ebx);
-  __ mov(eax, Immediate(key));
-  __ GetNumberHash(eax, ebx);
-  __ pop(ebx);
-  __ Ret();
-#elif V8_TARGET_ARCH_X64
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-  __ push(rbx);
-  __ movq(rax, Immediate(key));
-  __ GetNumberHash(rax, rbx);
-  __ pop(rbx);
-  __ pop(kRootRegister);
-  __ Ret();
-#elif V8_TARGET_ARCH_ARM
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-  __ mov(r0, Operand(key));
-  __ GetNumberHash(r0, ip);
-  __ pop(kRootRegister);
-  __ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_MIPS
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-  __ li(v0, Operand(key));
-  __ GetNumberHash(v0, t1);
-  __ pop(kRootRegister);
-  __ jr(ra);
-  __ nop();
 #endif
 }
 
@@ -155,19 +113,19 @@
 void check(i::Vector<const char> string) {
   v8::HandleScope scope;
   v8::internal::byte buffer[2048];
-  MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
+  MacroAssembler assm(Isolate::Current(), buffer, sizeof buffer);
 
-  generate(&masm, string);
+  generate(&assm, string);
 
   CodeDesc desc;
-  masm.GetCode(&desc);
+  assm.GetCode(&desc);
   Code* code = Code::cast(HEAP->CreateCode(
       desc,
       Code::ComputeFlags(Code::STUB),
       Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
   CHECK(code->IsCode());
 
-  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
+  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(Code::cast(code)->entry());
   Handle<String> v8_string = FACTORY->NewStringFromAscii(string);
   v8_string->set_hash_field(String::kEmptyHashField);
 #ifdef USE_SIMULATOR
@@ -181,47 +139,12 @@
 }
 
 
-void check(uint32_t key) {
-  v8::HandleScope scope;
-  v8::internal::byte buffer[2048];
-  MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
-
-  generate(&masm, key);
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  Code* code = Code::cast(HEAP->CreateCode(
-      desc,
-      Code::ComputeFlags(Code::STUB),
-      Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
-  CHECK(code->IsCode());
-
-  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
-#ifdef USE_SIMULATOR
-  uint32_t codegen_hash =
-      reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
-#else
-  uint32_t codegen_hash = hash();
-#endif
-
-  uint32_t runtime_hash = ComputeIntegerHash(
-      key,
-      Isolate::Current()->heap()->HashSeed());
-  CHECK(runtime_hash == codegen_hash);
-}
-
-
 void check_twochars(char a, char b) {
   char ab[2] = {a, b};
   check(i::Vector<const char>(ab, 2));
 }
 
 
-static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
-  return ~(~((i * 781) ^ (j * 329)));
-}
-
-
 TEST(StringHash) {
   if (env.IsEmpty()) env = v8::Context::New();
   for (int a = 0; a < String::kMaxAsciiCharCode; a++) {
@@ -232,6 +155,7 @@
       check_twochars(static_cast<char>(a), static_cast<char>(b));
     }
   }
+  check(i::Vector<const char>("",        0));
   check(i::Vector<const char>("*",       1));
   check(i::Vector<const char>(".zZ",     3));
   check(i::Vector<const char>("muc",     3));
@@ -239,22 +163,4 @@
   check(i::Vector<const char>("-=[ vee eight ftw ]=-", 21));
 }
 
-
-TEST(NumberHash) {
-  if (env.IsEmpty()) env = v8::Context::New();
-
-  // Some specific numbers
-  for (uint32_t key = 0; key < 42; key += 7) {
-    check(key);
-  }
-
-  // Some pseudo-random numbers
-  static const uint32_t kLimit = 1000;
-  for (uint32_t i = 0; i < 5; i++) {
-    for (uint32_t j = 0; j < 5; j++) {
-      check(PseudoRandom(i, j) % kLimit);
-    }
-  }
-}
-
 #undef __
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 6c2afd4..81b68a7 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -252,6 +252,28 @@
   CHECK_EQ(v8::HeapGraphNode::kHeapNumber, b->GetType());
 }
 
+TEST(HeapSnapshotSlicedString) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun(
+      "parent_string = \"123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789."
+      "123456789.123456789.123456789.123456789.123456789.\";"
+      "child_string = parent_string.slice(100);");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("strings"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* parent_string =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "parent_string");
+  CHECK_NE(NULL, parent_string);
+  const v8::HeapGraphNode* child_string =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "child_string");
+  CHECK_NE(NULL, child_string);
+  const v8::HeapGraphNode* parent =
+      GetProperty(child_string, v8::HeapGraphEdge::kInternal, "parent");
+  CHECK_EQ(parent_string, parent);
+}
 
 TEST(HeapSnapshotInternalReferences) {
   v8::HandleScope scope;
@@ -294,7 +316,7 @@
   const v8::HeapSnapshot* snapshot1 =
       v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
 
-  HEAP->CollectAllGarbage(true);  // Enforce compaction.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   const v8::HeapSnapshot* snapshot2 =
       v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
@@ -873,6 +895,20 @@
 }
 
 
+TEST(NoHandleLeaks) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("document = { URL:\"abcdefgh\" };");
+
+  v8::Handle<v8::String> name(v8_str("leakz"));
+  int count_before = i::HandleScope::NumberOfHandles();
+  v8::HeapProfiler::TakeSnapshot(name);
+  int count_after = i::HandleScope::NumberOfHandles();
+  CHECK_EQ(count_before, count_after);
+}
+
+
 TEST(NodesIteration) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1001,3 +1037,31 @@
   CHECK_EQ(0, StringCmp(
       "Object", i::V8HeapExplorer::GetConstructorName(*js_obj6)));
 }
+
+
+TEST(FastCaseGetter) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("var obj1 = {};\n"
+             "obj1.__defineGetter__('propWithGetter', function Y() {\n"
+             "  return 42;\n"
+             "});\n"
+             "obj1.__defineSetter__('propWithSetter', function Z(value) {\n"
+             "  return this.value_ = value;\n"
+             "});\n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("fastCaseGetter"));
+
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  const v8::HeapGraphNode* obj1 =
+      GetProperty(global, v8::HeapGraphEdge::kShortcut, "obj1");
+  CHECK_NE(NULL, obj1);
+  const v8::HeapGraphNode* getterFunction =
+      GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get-propWithGetter");
+  CHECK_NE(NULL, getterFunction);
+  const v8::HeapGraphNode* setterFunction =
+      GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set-propWithSetter");
+  CHECK_NE(NULL, setterFunction);
+}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 11b8813..2ea359d 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include <stdlib.h>
 
@@ -667,22 +667,23 @@
   Handle<JSObject> object = FACTORY->NewJSObject(function);
   Handle<JSArray> array = Handle<JSArray>::cast(object);
   // We just initialized the VM, no heap allocation failure yet.
-  Object* ok = array->Initialize(0)->ToObjectChecked();
+  array->Initialize(0)->ToObjectChecked();
 
   // Set array length to 0.
-  ok = array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
+  array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(0), array->length());
-  CHECK(array->HasFastElements());  // Must be in fast mode.
+  // Must be in fast mode.
+  CHECK(array->HasFastTypeElements());
 
   // array[length] = name.
-  ok = array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
+  array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(1), array->length());
   CHECK_EQ(array->GetElement(0), *name);
 
   // Set array length with larger than smi value.
   Handle<Object> length =
       FACTORY->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
-  ok = array->SetElementsLength(*length)->ToObjectChecked();
+  array->SetElementsLength(*length)->ToObjectChecked();
 
   uint32_t int_length = 0;
   CHECK(length->ToArrayIndex(&int_length));
@@ -690,8 +691,7 @@
   CHECK(array->HasDictionaryElements());  // Must be in slow mode.
 
   // array[length] = name.
-  ok = array->SetElement(
-      int_length, *name, kNonStrictMode, true)->ToObjectChecked();
+  array->SetElement(int_length, *name, kNonStrictMode, true)->ToObjectChecked();
   uint32_t new_int_length = 0;
   CHECK(array->length()->ToArrayIndex(&new_int_length));
   CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -718,10 +718,8 @@
   obj->SetProperty(
       *second, Smi::FromInt(2), NONE, kNonStrictMode)->ToObjectChecked();
 
-  Object* ok =
-      obj->SetElement(0, *first, kNonStrictMode, true)->ToObjectChecked();
-
-  ok = obj->SetElement(1, *second, kNonStrictMode, true)->ToObjectChecked();
+  obj->SetElement(0, *first, kNonStrictMode, true)->ToObjectChecked();
+  obj->SetElement(1, *second, kNonStrictMode, true)->ToObjectChecked();
 
   // Make the clone.
   Handle<JSObject> clone = Copy(obj);
@@ -739,8 +737,8 @@
   clone->SetProperty(
       *second, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
 
-  ok = clone->SetElement(0, *second, kNonStrictMode, true)->ToObjectChecked();
-  ok = clone->SetElement(1, *first, kNonStrictMode, true)->ToObjectChecked();
+  clone->SetElement(0, *second, kNonStrictMode, true)->ToObjectChecked();
+  clone->SetElement(1, *first, kNonStrictMode, true)->ToObjectChecked();
 
   CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
   CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
@@ -822,7 +820,7 @@
       FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
 
   // Allocate a large string (for large object space).
-  int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
+  int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
   char* str = new char[large_size];
   for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
   str[large_size - 1] = '\0';
@@ -838,49 +836,6 @@
 }
 
 
-TEST(LargeObjectSpaceContains) {
-  InitializeVM();
-
-  HEAP->CollectGarbage(NEW_SPACE);
-
-  Address current_top = HEAP->new_space()->top();
-  Page* page = Page::FromAddress(current_top);
-  Address current_page = page->address();
-  Address next_page = current_page + Page::kPageSize;
-  int bytes_to_page = static_cast<int>(next_page - current_top);
-  if (bytes_to_page <= FixedArray::kHeaderSize) {
-    // Alas, need to cross another page to be able to
-    // put desired value.
-    next_page += Page::kPageSize;
-    bytes_to_page = static_cast<int>(next_page - current_top);
-  }
-  CHECK(bytes_to_page > FixedArray::kHeaderSize);
-
-  intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
-  Address flags_addr = reinterpret_cast<Address>(flags_ptr);
-
-  int bytes_to_allocate =
-      static_cast<int>(flags_addr - current_top) + kPointerSize;
-
-  int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
-      kPointerSize;
-  CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements));
-  FixedArray* array = FixedArray::cast(
-      HEAP->AllocateFixedArray(n_elements)->ToObjectChecked());
-
-  int index = n_elements - 1;
-  CHECK_EQ(flags_ptr,
-           HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
-  array->set(index, Smi::FromInt(0));
-  // This chould have turned next page into LargeObjectPage:
-  // CHECK(Page::FromAddress(next_page)->IsLargeObjectPage());
-
-  HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize);
-  CHECK(HEAP->new_space()->Contains(addr));
-  CHECK(!HEAP->lo_space()->Contains(addr));
-}
-
-
 TEST(EmptyHandleEscapeFrom) {
   InitializeVM();
 
@@ -907,8 +862,7 @@
   InitializeVM();
 
   // Increase the chance of 'bump-the-pointer' allocation in old space.
-  bool force_compaction = true;
-  HEAP->CollectAllGarbage(force_compaction);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   v8::HandleScope scope;
 
@@ -975,12 +929,6 @@
     return;
   }
   CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
-
-  // Step 5: verify validity of region dirty marks.
-  Address clone_addr = clone->address();
-  Page* page = Page::FromAddress(clone_addr);
-  // Check that region covering inobject property 1 is marked dirty.
-  CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
 }
 
 
@@ -1010,17 +958,18 @@
   Handle<JSFunction> function(JSFunction::cast(func_value));
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // foo should no longer be in the compilation cache
   CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1109,7 +1058,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     // Get rid of f3 and f5 in the same way.
@@ -1118,21 +1067,21 @@
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     CompileRun("f5=null");
     for (int j = 0; j < 10; j++) {
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     ctx[i]->Exit();
   }
 
   // Force compilation cache cleanup.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Dispose the global contexts one by one.
   for (int i = 0; i < kNumTestContexts; i++) {
@@ -1146,7 +1095,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
   }
 
@@ -1161,7 +1110,7 @@
   Handle<Object> object(HEAP->global_contexts_list());
   while (!object->IsUndefined()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object =
         Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
   }
@@ -1180,7 +1129,7 @@
   while (object->IsJSFunction() &&
          !Handle<JSFunction>::cast(object)->IsBuiltin()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object = Handle<Object>(
         Object::cast(JSFunction::cast(*object)->next_function_link()));
   }
@@ -1240,90 +1189,319 @@
 
 TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
   InitializeVM();
+  HEAP->EnsureHeapIsIterable();
   intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   intptr_t size_of_objects_2 = 0;
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
     size_of_objects_2 += obj->Size();
   }
-  // Delta must be within 1% of the larger result.
+  // Delta must be within 5% of the larger result.
+  // TODO(gc): Tighten this up by distinguishing between byte
+  // arrays that are real and those that merely mark free space
+  // on the heap.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_1 / 100, delta);
+    CHECK_GT(size_of_objects_1 / 20, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_2 / 100, delta);
+    CHECK_GT(size_of_objects_2 / 20, delta);
   }
 }
 
 
-class HeapIteratorTestHelper {
- public:
-  HeapIteratorTestHelper(Object* a, Object* b)
-      : a_(a), b_(b), a_found_(false), b_found_(false) {}
-  bool a_found() { return a_found_; }
-  bool b_found() { return b_found_; }
-  void IterateHeap(HeapIterator::HeapObjectsFiltering mode) {
-    HeapIterator iterator(mode);
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      if (obj == a_)
-        a_found_ = true;
-      else if (obj == b_)
-        b_found_ = true;
-    }
+static void FillUpNewSpace(NewSpace* new_space) {
+  // Fill up new space to the point that it is completely full. Make sure
+  // that the scavenger does not undo the filling.
+  v8::HandleScope scope;
+  AlwaysAllocateScope always_allocate;
+  intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
+  intptr_t number_of_fillers = (available / FixedArray::SizeFor(1000)) - 10;
+  for (intptr_t i = 0; i < number_of_fillers; i++) {
+    CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(1000, NOT_TENURED)));
   }
- private:
-  Object* a_;
-  Object* b_;
-  bool a_found_;
-  bool b_found_;
-};
+}
 
-TEST(HeapIteratorFilterUnreachable) {
+
+TEST(GrowAndShrinkNewSpace) {
+  InitializeVM();
+  NewSpace* new_space = HEAP->new_space();
+
+  // Explicitly growing should double the space capacity.
+  intptr_t old_capacity, new_capacity;
+  old_capacity = new_space->Capacity();
+  new_space->Grow();
+  new_capacity = new_space->Capacity();
+  CHECK(2 * old_capacity == new_capacity);
+
+  old_capacity = new_space->Capacity();
+  FillUpNewSpace(new_space);
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+
+  // Explicitly shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+
+  // Let the scavenger empty the new space.
+  HEAP->CollectGarbage(NEW_SPACE);
+  CHECK_LE(new_space->Size(), old_capacity);
+
+  // Explicitly shrinking should halve the space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == 2 * new_capacity);
+
+  // Consecutive shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+}
+
+
+TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
   InitializeVM();
   v8::HandleScope scope;
-  CompileRun("a = {}; b = {};");
-  v8::Handle<Object> a(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("a"))->ToObjectChecked());
-  v8::Handle<Object> b(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("b"))->ToObjectChecked());
-  CHECK_NE(*a, *b);
-  {
-    HeapIteratorTestHelper helper(*a, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
+  NewSpace* new_space = HEAP->new_space();
+  intptr_t old_capacity, new_capacity;
+  old_capacity = new_space->Capacity();
+  new_space->Grow();
+  new_capacity = new_space->Capacity();
+  CHECK(2 * old_capacity == new_capacity);
+  FillUpNewSpace(new_space);
+  HEAP->CollectAllAvailableGarbage();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+}
+
+
+static int NumberOfGlobalObjects() {
+  int count = 0;
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (obj->IsGlobalObject()) count++;
   }
-  CHECK(ISOLATE->context()->global()->DeleteProperty(
-      *FACTORY->LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
-  // We ensure that GC will not happen, so our raw pointer stays valid.
-  AssertNoAllocation no_alloc;
-  Object* a_saved = *a;
-  a.Clear();
-  // Verify that "a" object still resides in the heap...
+  return count;
+}
+
+
+// Test that we don't embed maps from foreign contexts into
+// optimized code.
+TEST(LeakGlobalContextViaMap) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
   {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kNoFiltering);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
+    v8::HandleScope inner_scope;
+    CompileRun("var v = {x: 42}");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() { return o.x; }"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
   }
-  // ...but is now unreachable.
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+// Test that we don't embed functions from foreign contexts into
+// optimized code.
+TEST(LeakGlobalContextViaFunction) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
   {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(!helper.a_found());
-    CHECK(helper.b_found());
+    v8::HandleScope inner_scope;
+    CompileRun("var v = function() { return 42; }");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f(x) { return x(); }"
+        "for (var i = 0; i < 10; ++i) f(o);"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f(o);");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
   }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(LeakGlobalContextViaMapKeyed) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
+  {
+    v8::HandleScope inner_scope;
+    CompileRun("var v = [42, 43]");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() { return o[0]; }"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
+  }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(LeakGlobalContextViaMapProto) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(4, NumberOfGlobalObjects());
+
+  {
+    v8::HandleScope inner_scope;
+    CompileRun("var v = { y: 42}");
+    v8::Local<v8::Value> v = ctx1->Global()->Get(v8_str("v"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), v);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() {"
+        "  var p = {x: 42};"
+        "  p.__proto__ = o;"
+        "  return p.x;"
+        "}"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
+  }
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(2, NumberOfGlobalObjects());
+  ctx2.Dispose();
+  HEAP->CollectAllAvailableGarbage();
+  CHECK_EQ(0, NumberOfGlobalObjects());
+}
+
+
+TEST(InstanceOfStubWriteBarrier) {
+  if (!i::FLAG_crankshaft) return;
+  i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+  i::FLAG_verify_heap = true;
+#endif
+  InitializeVM();
+  v8::HandleScope outer_scope;
+
+  {
+    v8::HandleScope scope;
+    CompileRun(
+        "function foo () { }"
+        "function mkbar () { return new (new Function(\"\")) (); }"
+        "function f (x) { return (x instanceof foo); }"
+        "function g () { f(mkbar()); }"
+        "f(new foo()); f(new foo());"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f(new foo()); g();");
+  }
+
+  IncrementalMarking* marking = HEAP->incremental_marking();
+  marking->Abort();
+  marking->Start();
+
+  Handle<JSFunction> f =
+      v8::Utils::OpenHandle(
+          *v8::Handle<v8::Function>::Cast(
+              v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+
+  CHECK(f->IsOptimized());
+
+  while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
+         !marking->IsStopped()) {
+    marking->Step(MB);
+  }
+
+  CHECK(marking->IsMarking());
+
+  // Discard any pending GC requests otherwise we will get GC when we enter
+  // code below.
+  if (ISOLATE->stack_guard()->IsGCRequest()) {
+    ISOLATE->stack_guard()->Continue(GC_REQUEST);
+  }
+
+  {
+    v8::HandleScope scope;
+    v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
+    v8::Handle<v8::Function> g =
+        v8::Handle<v8::Function>::Cast(global->Get(v8_str("g")));
+    g->Call(global, 0, NULL);
+  }
+
+  HEAP->incremental_marking()->set_should_hurry(true);
+  HEAP->CollectGarbage(OLD_POINTER_SPACE);
 }
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
index 7360da5..5035f87 100644
--- a/test/cctest/test-lockers.cc
+++ b/test/cctest/test-lockers.cc
@@ -204,7 +204,11 @@
 
 // Run many threads all locking on the same isolate
 TEST(IsolateLockingStress) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   i::List<JoinableThread*> threads(kNThreads);
   v8::Isolate* isolate = v8::Isolate::New();
   for (int i = 0; i < kNThreads; i++) {
@@ -237,7 +241,7 @@
 
 // Run many threads each accessing its own isolate without locking
 TEST(MultithreadedParallelIsolates) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 10;
 #else
   const int kNThreads = 50;
@@ -275,7 +279,11 @@
 
 // Run  many threads with nested locks
 TEST(IsolateNestedLocking) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   v8::Isolate* isolate = v8::Isolate::New();
   i::List<JoinableThread*> threads(kNThreads);
   for (int i = 0; i < kNThreads; i++) {
@@ -311,7 +319,7 @@
 
 // Run parallel threads that lock and access different isolates in parallel
 TEST(SeparateIsolatesLocksNonexclusive) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -385,7 +393,7 @@
 
 // Use unlocker inside of a Locker, multiple threads.
 TEST(LockerUnlocker) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -438,7 +446,7 @@
 
 // Use Unlocker inside two Lockers.
 TEST(LockTwiceAndUnlock) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
   const int kNThreads = 50;
 #else
   const int kNThreads = 100;
@@ -559,7 +567,11 @@
 
 // Locker inside an Unlocker inside a Locker.
 TEST(LockUnlockLockMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   v8::Isolate* isolate = v8::Isolate::New();
   Persistent<v8::Context> context;
   {
@@ -606,7 +618,11 @@
 
 // Locker inside an Unlocker inside a Locker for default isolate.
 TEST(LockUnlockLockDefaultIsolateMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+  const int kNThreads = 50;
+#else
   const int kNThreads = 100;
+#endif
   Persistent<v8::Context> context;
   {
     v8::Locker locker_;
@@ -639,3 +655,68 @@
     isolate->Dispose();
   }
 }
+
+
+static const char* kSimpleExtensionSource =
+  "(function Foo() {"
+  "  return 4;"
+  "})() ";
+
+class IsolateGenesisThread : public JoinableThread {
+ public:
+  IsolateGenesisThread(int count, const char* extension_names[])
+    : JoinableThread("IsolateGenesisThread"),
+      count_(count),
+      extension_names_(extension_names)
+  {}
+
+  virtual void Run() {
+    v8::Isolate* isolate = v8::Isolate::New();
+    {
+      v8::Isolate::Scope isolate_scope(isolate);
+      CHECK(!i::Isolate::Current()->has_installed_extensions());
+      v8::ExtensionConfiguration extensions(count_, extension_names_);
+      v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
+      CHECK(i::Isolate::Current()->has_installed_extensions());
+      context.Dispose();
+    }
+    isolate->Dispose();
+  }
+ private:
+  int count_;
+  const char** extension_names_;
+};
+
+// Test installing extensions in separate isolates concurrently.
+// http://code.google.com/p/v8/issues/detail?id=1821
+TEST(ExtensionsRegistration) {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+  const int kNThreads = 10;
+#else
+  const int kNThreads = 40;
+#endif
+  v8::RegisterExtension(new v8::Extension("test0",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test1",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test2",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test3",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test4",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test5",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test6",
+                                          kSimpleExtensionSource));
+  v8::RegisterExtension(new v8::Extension("test7",
+                                          kSimpleExtensionSource));
+  const char* extension_names[] = { "test0", "test1",
+                                    "test2", "test3", "test4",
+                                    "test5", "test6", "test7" };
+  i::List<JoinableThread*> threads(kNThreads);
+  for (int i = 0; i < kNThreads; i++) {
+    threads.Add(new IsolateGenesisThread(8, extension_names));
+  }
+  StartJoinAndDeleteThreads(threads);
+}
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 72e663c..6f2324d 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -494,7 +494,7 @@
       "    (function a(j) { return function b() { return j; } })(100);\n"
       "})(this);");
   v8::V8::PauseProfiler();
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   LOGGER->StringEvent("test-logging-done", "");
 
   // Iterate heap to find compiled functions, will write to log.
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index dcb51a0..cd3c490 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -44,21 +44,21 @@
 }
 
 
-TEST(MarkingStack) {
+TEST(MarkingDeque) {
   int mem_size = 20 * kPointerSize;
   byte* mem = NewArray<byte>(20*kPointerSize);
   Address low = reinterpret_cast<Address>(mem);
   Address high = low + mem_size;
-  MarkingStack s;
+  MarkingDeque s;
   s.Initialize(low, high);
 
   Address address = NULL;
-  while (!s.is_full()) {
-    s.Push(HeapObject::FromAddress(address));
+  while (!s.IsFull()) {
+    s.PushBlack(HeapObject::FromAddress(address));
     address += kPointerSize;
   }
 
-  while (!s.is_empty()) {
+  while (!s.IsEmpty()) {
     Address value = s.Pop()->address();
     address -= kPointerSize;
     CHECK_EQ(address, value);
@@ -78,7 +78,7 @@
   // from new space.
   FLAG_gc_global = true;
   FLAG_always_compact = true;
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   InitializeVM();
 
@@ -86,7 +86,7 @@
 
   // Allocate a fixed array in the new space.
   int array_size =
-      (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+      (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
       (kPointerSize * 4);
   Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
 
@@ -104,7 +104,7 @@
 
 
 TEST(NoPromotion) {
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   // Test the situation that some objects in new space are promoted to
   // the old space
@@ -116,9 +116,12 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   // Allocate a big Fixed array in the new space.
-  int size = (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
-      kPointerSize;
-  Object* obj = HEAP->AllocateFixedArray(size)->ToObjectChecked();
+  int max_size =
+      Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
+
+  int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
+  Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
+      ToObjectChecked();
 
   Handle<FixedArray> array(FixedArray::cast(obj));
 
@@ -228,6 +231,8 @@
 }
 
 
+// TODO(1600): compaction of map space is temporary removed from GC.
+#if 0
 static Handle<Map> CreateMap() {
   return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
 }
@@ -252,11 +257,11 @@
   // be able to trigger map compaction.
   // To give an additional chance to fail, try to force compaction which
   // should be impossible right now.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kForceCompactionMask);
   // And now map pointers should be encodable again.
   CHECK(HEAP->map_space()->MapPointersEncodable());
 }
-
+#endif
 
 static int gc_starts = 0;
 static int gc_ends = 0;
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index d98b675..6f394b6 100755
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -32,6 +32,7 @@
 #include "v8.h"
 
 #include "cctest.h"
+#include "compiler.h"
 #include "execution.h"
 #include "isolate.h"
 #include "parser.h"
@@ -63,9 +64,9 @@
     CHECK(static_cast<int>(sizeof(buffer)) >= length);
     {
       i::Utf8ToUC16CharacterStream stream(keyword, length);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Scanner scanner(&unicode_cache);
       // The scanner should parse 'let' as Token::LET for this test.
-      scanner.SetHarmonyBlockScoping(true);
+      scanner.SetHarmonyScoping(true);
       scanner.Initialize(&stream);
       CHECK_EQ(key_token.token, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -73,7 +74,7 @@
     // Removing characters will make keyword matching fail.
     {
       i::Utf8ToUC16CharacterStream stream(keyword, length - 1);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -84,7 +85,7 @@
       memmove(buffer, keyword, length);
       buffer[length] = chars_to_append[j];
       i::Utf8ToUC16CharacterStream stream(buffer, length + 1);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -94,7 +95,7 @@
       memmove(buffer, keyword, length);
       buffer[length - 1] = '_';
       i::Utf8ToUC16CharacterStream stream(buffer, length);
-      i::JavaScriptScanner scanner(&unicode_cache);
+      i::Scanner scanner(&unicode_cache);
       scanner.Initialize(&stream);
       CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -229,7 +230,7 @@
   CHECK_EQ(11, error_location.end_pos);
   // Should not crash.
   const char* message = pre_impl->BuildMessage();
-  i::Vector<const char*> args = pre_impl->BuildArgs();
+  i::Vector<const char*> args(pre_impl->BuildArgs());
   CHECK_GT(strlen(message), 0);
 }
 
@@ -257,13 +258,14 @@
         reinterpret_cast<const i::byte*>(program),
         static_cast<unsigned>(strlen(program)));
     i::CompleteParserRecorder log;
-    i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+    i::Scanner scanner(i::Isolate::Current()->unicode_cache());
     scanner.Initialize(&stream);
 
+    int flags = i::kAllowLazy | i::kAllowNativesSyntax;
     v8::preparser::PreParser::PreParseResult result =
         v8::preparser::PreParser::PreParseProgram(&scanner,
                                                   &log,
-                                                  true,
+                                                  flags,
                                                   stack_limit);
     CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
     i::ScriptDataImpl data(log.ExtractData());
@@ -272,6 +274,43 @@
 }
 
 
+TEST(StandAlonePreParserNoNatives) {
+  v8::V8::Initialize();
+
+  int marker;
+  i::Isolate::Current()->stack_guard()->SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+  const char* programs[] = {
+      "%ArgleBargle(glop);",
+      "var x = %_IsSmi(42);",
+      NULL
+  };
+
+  uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+  for (int i = 0; programs[i]; i++) {
+    const char* program = programs[i];
+    i::Utf8ToUC16CharacterStream stream(
+        reinterpret_cast<const i::byte*>(program),
+        static_cast<unsigned>(strlen(program)));
+    i::CompleteParserRecorder log;
+    i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+    scanner.Initialize(&stream);
+
+    // Flags don't allow natives syntax.
+    v8::preparser::PreParser::PreParseResult result =
+        v8::preparser::PreParser::PreParseProgram(&scanner,
+                                                  &log,
+                                                  i::kAllowLazy,
+                                                  stack_limit);
+    CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+    i::ScriptDataImpl data(log.ExtractData());
+    // Data contains syntax error.
+    CHECK(data.has_error());
+  }
+}
+
+
 TEST(RegressChromium62639) {
   v8::V8::Initialize();
 
@@ -310,10 +349,10 @@
       "try { } catch (e) { var foo = function () { /* first */ } }"
       "var bar = function () { /* second */ }";
 
-  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
-                                      static_cast<unsigned>(strlen(program)));
-  i::ScriptDataImpl* data =
-      i::ParserApi::PartialPreParse(&stream, NULL, false);
+  v8::HandleScope handles;
+  i::Handle<i::String> source(
+      FACTORY->NewStringFromAscii(i::CStrVector(program)));
+  i::ScriptDataImpl* data = i::ParserApi::PartialPreParse(source, NULL, false);
   CHECK(!data->HasError());
 
   data->Initialize();
@@ -356,7 +395,7 @@
       reinterpret_cast<const i::byte*>(*program),
       static_cast<unsigned>(kProgramSize));
   i::CompleteParserRecorder log;
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
 
@@ -574,7 +613,7 @@
                        i::Token::Value* expected_tokens,
                        int skip_pos = 0,  // Zero means not skipping.
                        int skip_to = 0) {
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(stream);
 
   int i = 0;
@@ -655,7 +694,7 @@
   i::Utf8ToUC16CharacterStream stream(
        reinterpret_cast<const i::byte*>(re_source),
        static_cast<unsigned>(strlen(re_source)));
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   scanner.Initialize(&stream);
 
   i::Token::Value start = scanner.peek();
@@ -708,25 +747,170 @@
 }
 
 
-void TestParserSync(i::Handle<i::String> source, bool allow_lazy) {
+TEST(ScopePositions) {
+  // Test the parser for correctly setting the start and end positions
+  // of a scope. We check the scope positions of exactly one scope
+  // nested in the global scope of a program. 'inner source' is the
+  // source code that determines the part of the source belonging
+  // to the nested scope. 'outer_prefix' and 'outer_suffix' are
+  // parts of the source that belong to the global scope.
+  struct SourceData {
+    const char* outer_prefix;
+    const char* inner_source;
+    const char* outer_suffix;
+    i::ScopeType scope_type;
+    i::LanguageMode language_mode;
+  };
+
+  const SourceData source_data[] = {
+    { "  with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "{\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({}) ", "statement", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  with ({})\n"
+      "    ", "statement;", "\n"
+      "  more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", " more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", "; more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  try {} catch ", "(e) { block; }", " finally { block; } more;",
+      i::CATCH_SCOPE, i::CLASSIC_MODE },
+    { "  start;\n"
+      "  ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  ", "{\n"
+      "    let block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  start;\n"
+      "  function fun", "(a,b) { infunction; }", " more;",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  start;\n"
+      "  function fun", "(a,b) {\n"
+      "    infunction;\n"
+      "  }", "\n"
+      "  more;", i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  (function fun", "(a,b) { infunction; }", ")();",
+      i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x) statement", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x = 1 ; x < 10; ++ x)\n"
+      "    statement;", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) { block; }", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) { block; }", "; more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) {\n"
+      "    block;\n"
+      "  }", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) statement;", " more;",
+      i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {}) statement", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { "  for ", "(let x in {})\n"
+      "    statement;", "\n"
+      "  more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+    { NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
+  };
+
+  v8::HandleScope handles;
+  v8::Persistent<v8::Context> context = v8::Context::New();
+  v8::Context::Scope context_scope(context);
+
+  int marker;
+  i::Isolate::Current()->stack_guard()->SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+  i::FLAG_harmony_scoping = true;
+
+  for (int i = 0; source_data[i].outer_prefix; i++) {
+    int kPrefixLen = i::StrLength(source_data[i].outer_prefix);
+    int kInnerLen = i::StrLength(source_data[i].inner_source);
+    int kSuffixLen = i::StrLength(source_data[i].outer_suffix);
+    int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen;
+    i::Vector<char> program = i::Vector<char>::New(kProgramSize + 1);
+    int length = i::OS::SNPrintF(program, "%s%s%s",
+                                 source_data[i].outer_prefix,
+                                 source_data[i].inner_source,
+                                 source_data[i].outer_suffix);
+    CHECK(length == kProgramSize);
+
+    // Parse program source.
+    i::Handle<i::String> source(
+        FACTORY->NewStringFromAscii(i::CStrVector(program.start())));
+    i::Handle<i::Script> script = FACTORY->NewScript(source);
+    i::Parser parser(script, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL);
+    i::CompilationInfo info(script);
+    info.MarkAsGlobal();
+    info.SetLanguageMode(source_data[i].language_mode);
+    i::FunctionLiteral* function = parser.ParseProgram(&info);
+    CHECK(function != NULL);
+
+    // Check scope types and positions.
+    i::Scope* scope = function->scope();
+    CHECK(scope->is_global_scope());
+    CHECK_EQ(scope->start_position(), 0);
+    CHECK_EQ(scope->end_position(), kProgramSize);
+    CHECK_EQ(scope->inner_scopes()->length(), 1);
+
+    i::Scope* inner_scope = scope->inner_scopes()->at(0);
+    CHECK_EQ(inner_scope->type(), source_data[i].scope_type);
+    CHECK_EQ(inner_scope->start_position(), kPrefixLen);
+    // The end position of a token is one position after the last
+    // character belonging to that token.
+    CHECK_EQ(inner_scope->end_position(), kPrefixLen + kInnerLen);
+  }
+}
+
+
+void TestParserSync(i::Handle<i::String> source, int flags) {
   uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+  bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
 
   // Preparse the data.
   i::CompleteParserRecorder log;
-  i::JavaScriptScanner scanner(i::Isolate::Current()->unicode_cache());
+  i::Scanner scanner(i::Isolate::Current()->unicode_cache());
   i::GenericStringUC16CharacterStream stream(source, 0, source->length());
+  scanner.SetHarmonyScoping(harmony_scoping);
   scanner.Initialize(&stream);
   v8::preparser::PreParser::PreParseResult result =
       v8::preparser::PreParser::PreParseProgram(
-          &scanner, &log, allow_lazy, stack_limit);
+          &scanner, &log, flags, stack_limit);
   CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
   i::ScriptDataImpl data(log.ExtractData());
 
   // Parse the data
   i::Handle<i::Script> script = FACTORY->NewScript(source);
-  i::Parser parser(script, false, NULL, NULL);
-  i::FunctionLiteral* function =
-      parser.ParseProgram(source, true, i::kNonStrictMode);
+  bool save_harmony_scoping = i::FLAG_harmony_scoping;
+  i::FLAG_harmony_scoping = harmony_scoping;
+  i::Parser parser(script, flags, NULL, NULL);
+  i::CompilationInfo info(script);
+  info.MarkAsGlobal();
+  i::FunctionLiteral* function = parser.ParseProgram(&info);
+  i::FLAG_harmony_scoping = save_harmony_scoping;
 
   i::String* type_string = NULL;
   if (function == NULL) {
@@ -779,6 +963,23 @@
 }
 
 
+void TestParserSyncWithFlags(i::Handle<i::String> source) {
+  static const int kFlagsCount = 6;
+  const int flags[kFlagsCount] = {
+    i::kNoParsingFlags | i::CLASSIC_MODE,
+    i::kNoParsingFlags | i::STRICT_MODE,
+    i::kNoParsingFlags | i::EXTENDED_MODE,
+    i::kAllowLazy | i::CLASSIC_MODE,
+    i::kAllowLazy | i::STRICT_MODE,
+    i::kAllowLazy | i::EXTENDED_MODE
+  };
+
+  for (int k = 0; k < kFlagsCount; ++k) {
+    TestParserSync(source, flags[k]);
+  }
+}
+
+
 TEST(ParserSync) {
   const char* context_data[][2] = {
     { "", "" },
@@ -876,8 +1077,7 @@
         CHECK(length == kProgramSize);
         i::Handle<i::String> source =
             FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
-        TestParserSync(source, true);
-        TestParserSync(source, false);
+        TestParserSyncWithFlags(source);
       }
     }
   }
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 76fd244..def829c 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -52,7 +52,7 @@
     CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
   }
   CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
   CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 89a9112..b778478 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -530,7 +530,7 @@
   typedef int Key;
   typedef int Value;
   static const int kNoKey;
-  static const int kNoValue;
+  static int NoValue() { return 0; }
   static inline int Compare(int a, int b) {
     if (a < b)
       return -1;
@@ -543,7 +543,6 @@
 
 
 const int TestConfig::kNoKey = 0;
-const int TestConfig::kNoValue = 0;
 
 
 static unsigned PseudoRandom(int i, int j) {
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
index 5bdc4c3..e638201 100644
--- a/test/cctest/test-reloc-info.cc
+++ b/test/cctest/test-reloc-info.cc
@@ -34,7 +34,7 @@
 
 static void WriteRinfo(RelocInfoWriter* writer,
                        byte* pc, RelocInfo::Mode mode, intptr_t data) {
-  RelocInfo rinfo(pc, mode, data);
+  RelocInfo rinfo(pc, mode, data, NULL);
   writer->Write(&rinfo);
 }
 
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 8e85444..e426e7b 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -114,10 +114,6 @@
       ExternalReference(isolate->counters()->keyed_load_function_prototype());
   CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
            encoder.Encode(keyed_load_function_prototype.address()));
-  ExternalReference the_hole_value_location =
-      ExternalReference::the_hole_value_location(isolate);
-  CHECK_EQ(make_code(UNCLASSIFIED, 2),
-           encoder.Encode(the_hole_value_location.address()));
   ExternalReference stack_limit_address =
       ExternalReference::address_of_stack_limit(isolate);
   CHECK_EQ(make_code(UNCLASSIFIED, 4),
@@ -127,14 +123,15 @@
   CHECK_EQ(make_code(UNCLASSIFIED, 5),
            encoder.Encode(real_stack_limit_address.address()));
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  CHECK_EQ(make_code(UNCLASSIFIED, 15),
+  CHECK_EQ(make_code(UNCLASSIFIED, 16),
            encoder.Encode(ExternalReference::debug_break(isolate).address()));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(make_code(UNCLASSIFIED, 10),
            encoder.Encode(
                ExternalReference::new_space_start(isolate).address()));
   CHECK_EQ(make_code(UNCLASSIFIED, 3),
-           encoder.Encode(ExternalReference::roots_address(isolate).address()));
+           encoder.Encode(
+               ExternalReference::roots_array_start(isolate).address()));
 }
 
 
@@ -157,15 +154,13 @@
            decoder.Decode(
                make_code(STATS_COUNTER,
                          Counters::k_keyed_load_function_prototype)));
-  CHECK_EQ(ExternalReference::the_hole_value_location(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 2)));
   CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 4)));
   CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 5)));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::debug_break(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 15)));
+           decoder.Decode(make_code(UNCLASSIFIED, 16)));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 10)));
@@ -365,8 +360,8 @@
       Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
     }
   }
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   Object* raw_foo;
   {
@@ -490,7 +485,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of env.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
   Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -563,16 +558,20 @@
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
+  int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
+  int code_space_max = HEAP->code_space()->AreaSize();
 
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
+    size &= ~8;  // Round.
     int new_space_size = (size < new_space_max) ? size : new_space_max;
+    int paged_space_size = (size < paged_space_max) ? size : paged_space_max;
     HEAP->ReserveSpace(
         new_space_size,
-        size,              // Old pointer space.
-        size,              // Old data space.
-        size,              // Code space.
-        size,              // Map space.
-        size,              // Cell space.
+        paged_space_size,  // Old pointer space.
+        paged_space_size,  // Old data space.
+        HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
+        HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+        HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
         size);             // Large object space.
     LinearAllocationScope linear_allocation_scope;
     const int kSmallFixedArrayLength = 4;
@@ -599,14 +598,14 @@
 
     Object* pointer_last = NULL;
     for (int i = 0;
-         i + kSmallFixedArraySize <= size;
+         i + kSmallFixedArraySize <= paged_space_size;
          i += kSmallFixedArraySize) {
       Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
                                              TENURED)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_pointer_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         pointer_last = NULL;
       }
@@ -618,13 +617,15 @@
     }
 
     Object* data_last = NULL;
-    for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
+    for (int i = 0;
+         i + kSmallStringSize <= paged_space_size;
+         i += kSmallStringSize) {
       Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
                                                  TENURED)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_data_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         data_last = NULL;
       }
@@ -636,13 +637,13 @@
     }
 
     Object* map_last = NULL;
-    for (int i = 0; i + kMapSize <= size; i += kMapSize) {
+    for (int i = 0; i + kMapSize <= paged_space_size; i += kMapSize) {
       Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
                                       42 * kPointerSize)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kMapSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->map_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         map_last = NULL;
       }
@@ -653,7 +654,7 @@
       map_last = obj;
     }
 
-    if (size > Page::kObjectAreaSize) {
+    if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Support for reserving space in large object space is not there yet,
       // but using an always-allocate scope is fine for now.
       AlwaysAllocateScope always;
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index 0f22ce1..27f64b4 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,7 +32,9 @@
 
 using namespace v8::internal;
 
+#if 0
 static void VerifyRegionMarking(Address page_start) {
+#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
   Page* p = Page::FromAddress(page_start);
 
   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
@@ -54,9 +56,13 @@
        addr += kPointerSize) {
     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
   }
+#endif
 }
+#endif
 
 
+// TODO(gc) you can no longer allocate pages like this. Details are hidden.
+#if 0
 TEST(Page) {
   byte* mem = NewArray<byte>(2*Page::kPageSize);
   CHECK(mem != NULL);
@@ -89,6 +95,7 @@
 
   DeleteArray(mem);
 }
+#endif
 
 
 namespace v8 {
@@ -122,62 +129,46 @@
   Isolate* isolate = Isolate::Current();
   isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
+  CHECK(isolate->heap()->ConfigureHeapDefault());
+
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->Setup(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
-  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
 
+  int total_pages = 0;
   OldSpace faked_space(heap,
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
                        NOT_EXECUTABLE);
-  int total_pages = 0;
-  int requested = MemoryAllocator::kPagesPerChunk;
-  int allocated;
-  // If we request n pages, we should get n or n - 1.
-  Page* first_page = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(first_page->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
+  Page* first_page =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
 
-  Page* last_page = first_page;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
-    last_page = p;
+  first_page->InsertAfter(faked_space.anchor()->prev_page());
+  CHECK(first_page->is_valid());
+  CHECK(first_page->next_page() == faked_space.anchor());
+  total_pages++;
+
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
   }
 
   // Again, we should get n or n - 1 pages.
-  Page* others = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(others->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
-
-  memory_allocator->SetNextPage(last_page, others);
+  Page* other =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+  CHECK(other->is_valid());
+  total_pages++;
+  other->InsertAfter(first_page);
   int page_count = 0;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
     page_count++;
   }
   CHECK(total_pages == page_count);
 
   Page* second_page = first_page->next_page();
   CHECK(second_page->is_valid());
-
-  // Freeing pages at the first chunk starting at or after the second page
-  // should free the entire second chunk.  It will return the page it was passed
-  // (since the second page was in the first chunk).
-  Page* free_return = memory_allocator->FreePages(second_page);
-  CHECK(free_return == second_page);
-  memory_allocator->SetNextPage(first_page, free_return);
-
-  // Freeing pages in the first chunk starting at the first page should free
-  // the first chunk and return an invalid page.
-  Page* invalid_page = memory_allocator->FreePages(first_page);
-  CHECK(!invalid_page->is_valid());
-
+  memory_allocator->Free(first_page);
+  memory_allocator->Free(second_page);
   memory_allocator->TearDown();
   delete memory_allocator;
 }
@@ -196,17 +187,14 @@
 
   NewSpace new_space(heap);
 
-  void* chunk =
-      memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = RoundUp(static_cast<Address>(chunk),
-                          2 * heap->ReservedSemiSpaceSize());
-  CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize()));
+  CHECK(new_space.Setup(HEAP->ReservedSemiSpaceSize(),
+                        HEAP->ReservedSemiSpaceSize()));
   CHECK(new_space.HasBeenSetup());
 
-  while (new_space.Available() >= Page::kMaxHeapObjectSize) {
+  while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
     Object* obj =
-        new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+        new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
+        ToObjectUnchecked();
     CHECK(new_space.Contains(HeapObject::cast(obj)));
   }
 
@@ -233,16 +221,10 @@
                              NOT_EXECUTABLE);
   CHECK(s != NULL);
 
-  void* chunk = memory_allocator->ReserveInitialChunk(
-      4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = static_cast<Address>(chunk);
-  size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start;
-
-  CHECK(s->Setup(start, size));
+  CHECK(s->Setup());
 
   while (s->Available() > 0) {
-    s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+    s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
   }
 
   s->TearDown();
@@ -258,14 +240,12 @@
   LargeObjectSpace* lo = HEAP->lo_space();
   CHECK(lo != NULL);
 
-  Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
   int lo_size = Page::kPageSize;
 
-  Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked();
+  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
   CHECK(obj->IsHeapObject());
 
   HeapObject* ho = HeapObject::cast(obj);
-  ho->set_map(faked_map);
 
   CHECK(lo->Contains(HeapObject::cast(obj)));
 
@@ -275,14 +255,13 @@
 
   while (true) {
     intptr_t available = lo->Available();
-    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size);
+    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
       if (!maybe_obj->ToObject(&obj)) break;
     }
-    HeapObject::cast(obj)->set_map(faked_map);
     CHECK(lo->Available() < available);
   };
 
   CHECK(!lo->IsEmpty());
 
-  CHECK(lo->AllocateRaw(lo_size)->IsFailure());
+  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
 }
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 55c2141..93f7588 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // Check that we can traverse very deep stacks of ConsStrings using
 // StringInputBuffer.  Check that Get(int) works on very deep stacks
@@ -502,6 +502,35 @@
 }
 
 
+class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+ public:
+  explicit AsciiVectorResource(i::Vector<const char> vector)
+      : data_(vector) {}
+  virtual ~AsciiVectorResource() {}
+  virtual size_t length() const { return data_.length(); }
+  virtual const char* data() const { return data_.start(); }
+ private:
+  i::Vector<const char> data_;
+};
+
+
+TEST(SliceFromExternal) {
+  FLAG_string_slices = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  AsciiVectorResource resource(
+      i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
+  Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
+  CHECK(string->IsExternalString());
+  Handle<String> slice = FACTORY->NewSubString(string, 1, 25);
+  CHECK(slice->IsSlicedString());
+  CHECK(string->IsExternalString());
+  CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
+  CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
+  CHECK(slice->IsFlat());
+}
+
+
 TEST(TrivialSlice) {
   // This tests whether a slice that contains the entire parent string
   // actually creates a new string (it should not).
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index 985b9e5..713d1e8 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -63,7 +63,7 @@
 static Turn turn = FILL_CACHE;
 
 
-class ThreadA: public v8::internal::Thread {
+class ThreadA : public v8::internal::Thread {
  public:
   ThreadA() : Thread("ThreadA") { }
   void Run() {
@@ -99,7 +99,7 @@
 };
 
 
-class ThreadB: public v8::internal::Thread {
+class ThreadB : public v8::internal::Thread {
  public:
   ThreadB() : Thread("ThreadB") { }
   void Run() {
@@ -111,7 +111,7 @@
           v8::Context::Scope context_scope(v8::Context::New());
 
           // Clear the caches by forcing major GC.
-          HEAP->CollectAllGarbage(false);
+          HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
           turn = SECOND_TIME_FILL_CACHE;
           break;
         }
@@ -190,3 +190,19 @@
     delete threads[i];
   }
 }
+
+
+class ThreadC : public v8::internal::Thread {
+ public:
+  ThreadC() : Thread("ThreadC") { }
+  void Run() {
+    Join();
+  }
+};
+
+
+TEST(ThreadJoinSelf) {
+  ThreadC thread;
+  thread.Start();
+  thread.Join();
+}
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
index db4db25..56d5936 100644
--- a/test/cctest/test-weakmaps.cc
+++ b/test/cctest/test-weakmaps.cc
@@ -50,7 +50,7 @@
                            Handle<JSObject> key,
                            int value) {
   Handle<ObjectHashTable> table = PutIntoObjectHashTable(
-      Handle<ObjectHashTable>(weakmap->table()),
+      Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
       Handle<JSObject>(JSObject::cast(*key)),
       Handle<Smi>(Smi::FromInt(value)));
   weakmap->set_table(*table);
@@ -85,13 +85,14 @@
     v8::HandleScope scope;
     PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
   }
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
 
   // Force a full GC.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(0, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Make the global reference to the key weak.
   {
@@ -107,12 +108,14 @@
   // weak references whereas the second one will also clear weak maps.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(1, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      1, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 }
 
 
@@ -122,7 +125,7 @@
   Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
 
   // Check initial capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Fill up weak map to trigger capacity change.
   {
@@ -135,15 +138,17 @@
   }
 
   // Check increased capacity.
-  CHECK_EQ(128, weakmap->table()->Capacity());
+  CHECK_EQ(128, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Force a full GC.
-  CHECK_EQ(32, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(32, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      32, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Check shrunk capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 }
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index d095a24..bf3ee8b 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -41,16 +41,6 @@
 # We are compatible with Safari and Firefox.
 chapter11/11.1/11.1.5: UNIMPLEMENTED
 
-# We do not have a global object called 'global' as required by tests.
-chapter15/15.1: FAIL_OK
-
-# NaN is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
-# Infinity is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
-# undefined is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
-
 # Our Function object has an "arguments" property which is used as a
 # non-property in the test.
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
@@ -106,9 +96,6 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
 
-# We do not implement all methods on RegExp.
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL
-
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
 
@@ -196,27 +183,6 @@
 # have no effect on the actual array on which reduceRight is called.
 chapter15/15.4/15.4.4/15.4.4.22/15.4.4.22-9-7: FAIL_OK
 
-# We do not correctly recognize \uFEFF as whitespace
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-10: FAIL
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-18: FAIL
-chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-34: FAIL
-
-# RegExp.prototype is not of type RegExp - we are bug compatible with JSC.
-chapter15/15.10/15.10.6/15.10.6: FAIL_OK
-
-# We do not have the properties of a RegExp instance on RegExp.prototype.
-# The spec says we should - but we are currently bug compatible with JSC.
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
-
 ##############################################################################
 # Unimplemented parts of strict mode
 # Setting expectations to fail only so that the tests trigger as soon as
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index c166110..413ee93 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -190,3 +190,10 @@
     "moreseper-prime");
 
 delete(Array.prototype["1"]);
+
+// Check correct handling of non-array argument lists.
+assertSame(this, f0.apply(this, {}), "non-array-1");
+assertSame(this, f0.apply(this, { length:1 }), "non-array-2");
+assertEquals(void 0, f1.apply(this, { length:1 }), "non-array-3");
+assertEquals(void 0, f1.apply(this, { 0:"foo" }), "non-array-4");
+assertEquals("foo", f1.apply(this, { length:1, 0:"foo" }), "non-array-5");
diff --git a/test/mjsunit/array-literal-transitions.js b/test/mjsunit/array-literal-transitions.js
new file mode 100644
index 0000000..321340c
--- /dev/null
+++ b/test/mjsunit/array-literal-transitions.js
@@ -0,0 +1,125 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case.  Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiOnlyElements(new Array());
+
+// IC and Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; }  // Used to generate dynamic values.
+
+function array_literal_test() {
+  var a0 = [1, 2, 3];
+  assertTrue(%HasFastSmiOnlyElements(a0));
+  var a1 = [get(1), get(2), get(3)];
+  assertTrue(%HasFastSmiOnlyElements(a1));
+
+  var b0 = [1, 2, get("three")];
+  assertTrue(%HasFastElements(b0));
+  var b1 = [get(1), get(2), get("three")];
+  assertTrue(%HasFastElements(b1));
+
+  var c0 = [1, 2, get(3.5)];
+  assertTrue(%HasFastDoubleElements(c0));
+  assertEquals(3.5, c0[2]);
+  assertEquals(2, c0[1]);
+  assertEquals(1, c0[0]);
+
+  var c1 = [1, 2, 3.5];
+  assertTrue(%HasFastDoubleElements(c1));
+  assertEquals(3.5, c1[2]);
+  assertEquals(2, c1[1]);
+  assertEquals(1, c1[0]);
+
+  var c2 = [get(1), get(2), get(3.5)];
+  assertTrue(%HasFastDoubleElements(c2));
+  assertEquals(3.5, c2[2]);
+  assertEquals(2, c2[1]);
+  assertEquals(1, c2[0]);
+
+  var object = new Object();
+  var d0 = [1, 2, object];
+  assertTrue(%HasFastElements(d0));
+  assertEquals(object, d0[2]);
+  assertEquals(2, d0[1]);
+  assertEquals(1, d0[0]);
+
+  var e0 = [1, 2, 3.5];
+  assertTrue(%HasFastDoubleElements(e0));
+  assertEquals(3.5, e0[2]);
+  assertEquals(2, e0[1]);
+  assertEquals(1, e0[0]);
+
+  var f0 = [1, 2, [1, 2]];
+  assertTrue(%HasFastElements(f0));
+  assertEquals([1,2], f0[2]);
+  assertEquals(2, f0[1]);
+  assertEquals(1, f0[0]);
+}
+
+if (support_smi_only_arrays) {
+  for (var i = 0; i < 3; i++) {
+    array_literal_test();
+  }
+  %OptimizeFunctionOnNextCall(array_literal_test);
+  array_literal_test();
+
+  function test_large_literal() {
+
+    function d() {
+      gc();
+      return 2.5;
+    }
+
+    function o() {
+      gc();
+      return new Object();
+    }
+
+    large =
+        [ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ];
+    assertFalse(%HasDictionaryElements(large));
+    assertFalse(%HasFastSmiOnlyElements(large));
+    assertFalse(%HasFastDoubleElements(large));
+    assertTrue(%HasFastElements(large));
+    assertEquals(large,
+                 [0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
+                  new Object(), new Object(), new Object(), new Object()]);
+  }
+
+  for (var i = 0; i < 3; i++) {
+    test_large_literal();
+  }
+  %OptimizeFunctionOnNextCall(test_large_literal);
+  test_large_literal();
+}
diff --git a/test/mjsunit/array-tostring.js b/test/mjsunit/array-tostring.js
new file mode 100644
index 0000000..6708657
--- /dev/null
+++ b/test/mjsunit/array-tostring.js
@@ -0,0 +1,159 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Array's toString should call the object's own join method, if one exists and
+// is callable. Otherwise, just use the original Object.toString function.
+
+var success = "[test success]";
+var expectedThis;
+function testJoin() {
+  assertEquals(0, arguments.length);
+  assertSame(expectedThis, this);
+  return success;
+}
+
+
+// On an Array object.
+
+// Default case.
+var a1 = [1, 2, 3];
+assertEquals(a1.join(), a1.toString());
+
+// Non-standard "join" function is called correctly.
+var a2 = [1, 2, 3];
+a2.join = testJoin;
+expectedThis = a2;
+assertEquals(success, a2.toString());
+
+// Non-callable join function is ignored and Object.prototype.toString is
+// used instead.
+var a3 = [1, 2, 3];
+a3.join = "not callable";
+assertEquals("[object Array]", a3.toString());
+
+// Non-existing join function is treated same as non-callable.
+var a4 = [1, 2, 3];
+a4.__proto__ = { toString: Array.prototype.toString };
+// No join on Array.
+assertEquals("[object Array]", a4.toString());
+
+
+// On a non-Array object.
+
+// Default looks-like-an-array case.
+var o1 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: Array.prototype.join};
+assertEquals(o1.join(), o1.toString());
+
+
+// Non-standard join is called correctly.
+// Check that we don't read, e.g., length before calling join.
+var o2 = {toString : Array.prototype.toString,
+          join: testJoin,
+          get length() { assertUnreachable(); },
+          get 0() { assertUnreachable(); }};
+expectedThis = o2;
+assertEquals(success, o2.toString());
+
+// Non-standard join is called even if it looks like an array.
+var o3 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: testJoin};
+expectedThis = o3;
+assertEquals(success, o3.toString());
+
+// Non-callable join works same as for Array.
+var o4 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: "not callable"};
+assertEquals("[object Object]", o4.toString());
+
+
+// Non-existing join works same as for Array.
+var o5 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString
+          /* no join */};
+assertEquals("[object Object]", o5.toString());
+
+
+// Test that ToObject is called before getting "join", so the instance
+// that "join" is read from is the same one passed as receiver later.
+var called_before = false;
+expectedThis = null;
+Object.defineProperty(Number.prototype, "join", {get: function() {
+            assertFalse(called_before);
+            called_before = true;
+            expectedThis = this;
+            return testJoin;
+        }});
+Number.prototype.arrayToString = Array.prototype.toString;
+assertEquals(success, (42).arrayToString());
+
+// ----------------------------------------------------------
+// Testing Array.prototype.toLocaleString
+
+// Ensure that it never uses Array.prototype.toString for anything.
+Array.prototype.toString = function() { assertUnreachable(); };
+
+// Default case.
+var la1 = [1, [2, 3], 4];
+assertEquals("1,2,3,4", la1.toLocaleString());
+
+// Used on a string (which looks like an array of characters).
+String.prototype.toLocaleString = Array.prototype.toLocaleString;
+assertEquals("1,2,3,4", "1234".toLocaleString());
+
+// If toLocaleString of element is not callable, throw a TypeError.
+var la2 = [1, {toLocaleString: "not callable"}, 3];
+assertThrows(function() { la2.toLocaleString(); }, TypeError);
+
+// If toLocaleString of element is callable, call it.
+var la3 = [1, {toLocaleString: function() { return "XX";}}, 3];
+assertEquals("1,XX,3", la3.toLocaleString());
+
+// Omitted elements, as well as undefined and null, become empty string.
+var la4 = [1, null, 3, undefined, 5,, 7];
+assertEquals("1,,3,,5,,7", la4.toLocaleString());
+
+
+// ToObject is called first and the same object is being used for the
+// rest of the operations.
+Object.defineProperty(Number.prototype, "length", {
+    get: function() {
+      exptectedThis = this;
+      return 3;
+    }});
+for (var i = 0; i < 3; i++) {
+  Object.defineProperty(Number.prototype, i, {
+      get: function() {
+        assertEquals(expectedThis, this);
+        return +this;
+      }});
+}
+Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
+assertEquals("42,42,42", (42).arrayToLocaleString());
\ No newline at end of file
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
index c9adb5b..51cb99a 100644
--- a/test/mjsunit/assert-opt-and-deopt.js
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -150,11 +150,6 @@
 
 f(1);
 
-tracker.AssertOptCount(f, 0);
-tracker.AssertIsOptimized(f, false);
-tracker.AssertDeoptHappened(f, false);
-tracker.AssertDeoptCount(f, 0);
-
 %OptimizeFunctionOnNextCall(f);
 f(1);
 
@@ -172,6 +167,7 @@
 
 // Let's trigger optimization for another type.
 for (var i = 0; i < 5; i++) f("a");
+
 %OptimizeFunctionOnNextCall(f);
 f("b");
 
diff --git a/test/mjsunit/bugs/bug-618.js b/test/mjsunit/bugs/bug-618.js
index ae84326..0513f87 100644
--- a/test/mjsunit/bugs/bug-618.js
+++ b/test/mjsunit/bugs/bug-618.js
@@ -42,4 +42,4 @@
 
 assertEquals(23, new C().x);
 C.prototype.__defineSetter__('x', function(value) { this.y = 23; });
-assertEquals(void 0, new C().x));
+assertEquals(void 0, new C().x);
diff --git a/test/mjsunit/bugs/harmony/debug-blockscopes.js b/test/mjsunit/bugs/harmony/debug-blockscopes.js
index a407c53..fda32eb 100644
--- a/test/mjsunit/bugs/harmony/debug-blockscopes.js
+++ b/test/mjsunit/bugs/harmony/debug-blockscopes.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
diff --git a/test/mjsunit/compiler/compare.js b/test/mjsunit/compiler/compare.js
index 3f96087..460b0ab 100644
--- a/test/mjsunit/compiler/compare.js
+++ b/test/mjsunit/compiler/compare.js
@@ -83,9 +83,9 @@
 }
 
 TestNonPrimitive("xy", MaxLT);
-TestNonPrimitive("yx", MaxLE);
+TestNonPrimitive("xy", MaxLE);
 TestNonPrimitive("xy", MaxGE);
-TestNonPrimitive("yx", MaxGT);
+TestNonPrimitive("xy", MaxGT);
 
 // Test compare in case of aliased registers.
 function CmpX(x) { if (x == x) return 42; }
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/inline-context-slots.js
similarity index 78%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/compiler/inline-context-slots.js
index 2502b53..d0e907b 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/inline-context-slots.js
@@ -25,22 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Test inlining of functions with context slots.
 
-var e = new Error();
-assertEquals('Error', e + '');
+// Flags: --allow-natives-syntax
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Caller/callee without a local context.
+
+(function() {
+  var X = 5;
+  var Y = 10;
+  function F() {}
+  F.prototype.max = function() {
+    return X > Y ? X : Y;
+  }
+  F.prototype.run = function() {
+    return this.max();
+  }
+  var f = new F();
+  for (var i=0; i<5; i++) f.run();
+  %OptimizeFunctionOnNextCall(f.run);
+  assertEquals(10, f.run());
+})();
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/lazy-const-lookup.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/lazy-const-lookup.js
index d3f2e35..b4f15a1 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/lazy-const-lookup.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+function outer() {
+  const x = 1;
+  function inner() {
+    return x;
+  }
+  inner();
+  %OptimizeFunctionOnNextCall(inner);
+  inner();
+}
 
-assertThrows('eval(delete eval)');
+outer();
 
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/compiler/regress-96989.js
similarity index 81%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/compiler/regress-96989.js
index d3f2e35..aedeb24 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/compiler/regress-96989.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
 
-// See http://code.google.com/p/v8/issues/detail?id=221
+// Flags: --allow-natives-syntax
 
-assertThrows('eval(delete eval)');
+// Test correct handling of uninitialized const.
 
+function test() {
+  for (var i = 41; i < 42; i++) {
+    var c = t ^ i;
+  }
+  const t;
+  return c;
+}
+
+for (var i=0; i<10; i++) test();
+%OptimizeFunctionOnNextCall(test);
+assertEquals(41, test());
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/regress-deopt-call-as-function.js
similarity index 66%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/compiler/regress-deopt-call-as-function.js
index 2502b53..c408096 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/regress-deopt-call-as-function.js
@@ -25,22 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Test deoptimization after inlined call.
 
-var e = new Error();
-assertEquals('Error', e + '');
+function bar(a, b) {try { return a; } finally { } }
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+function test_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(t = foo(i) ? bar(42 + i - i) : bar(0), s = i + t);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, test_context());
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+
+function value_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(t = foo(i), s = i + t);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, value_context());
+
+
+function effect_context() {
+  function foo(x) { return 42; }
+  var s, t;
+  for (var i = 0x7fff0000; i < 0x80000000; i++) {
+    bar(foo(i), s = i + 42);
+  }
+  return s;
+}
+assertEquals(0x7fffffff + 42, effect_context());
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/regress-inline-callfunctionstub.js
similarity index 80%
rename from test/mjsunit/cyclic-error-to-string.js
rename to test/mjsunit/compiler/regress-inline-callfunctionstub.js
index 2502b53..a39d26d 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/regress-inline-callfunctionstub.js
@@ -25,22 +25,22 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+// Test inlined of calls-as-function two levels deep.
+function f() { return 42; }
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+var o = {g : function () { return f(); } }
+function main(func) {
+  var v=0;
+  for (var i=0; i<1; i++) {
+    if (func()) v = 42;
+  }
+}
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+main(o.g);
+main(o.g);
+main(o.g);
+%OptimizeFunctionOnNextCall(main);
+main(o.g);
+
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/compiler/strict-recompile.js
similarity index 77%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/compiler/strict-recompile.js
index 2502b53..96e8bca 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/compiler/strict-recompile.js
@@ -25,22 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+function foo() {
+  try {
+    var o = {};
+    Object.defineProperty(o, 'x', {value: 12, writable: false});
+    o.x = 13;
+  } catch(e) {
+    return true;
+  }
+  return false;
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+assertFalse(foo());
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+function do_eval(str) {
+  "use strict";
+  return eval(str);
+}
+
+var eval_foo = do_eval('(' + foo + ')');
+for (var i = 0; i < 5; i++) assertTrue(eval_foo());
+%OptimizeFunctionOnNextCall(eval_foo);
+assertTrue(eval_foo());
diff --git a/test/mjsunit/const-redecl.js b/test/mjsunit/const-redecl.js
index 9459708..c0b97e6 100644
--- a/test/mjsunit/const-redecl.js
+++ b/test/mjsunit/const-redecl.js
@@ -98,7 +98,8 @@
   var msg = s;
   if (opt_e) { e = opt_e; msg += "; " + opt_e; }
   assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
-  assertEquals(expected, TestGlobal(s,e), "global:'" + msg + "'");
+  // Redeclarations of global consts do not throw, they are silently ignored.
+  assertEquals(42, TestGlobal(s, 42), "global:'" + msg + "'");
   assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
 }
 
@@ -218,3 +219,62 @@
 // Test that const inside with behaves correctly.
 TestAll(87, "with ({x:42}) { const x = 87; }", "x");
 TestAll(undefined, "with ({x:42}) { const x; }", "x");
+
+
+// Additional tests for how various combinations of re-declarations affect
+// the values of the var/const in question.
+try {
+  eval("var undefined;");
+} catch (ex) {
+  assertUnreachable("undefined (1) has thrown");
+}
+
+var original_undef = undefined;
+var undefined = 1;  // Should be silently ignored.
+assertEquals(original_undef, undefined, "undefined got overwritten");
+undefined = original_undef;
+
+var a; const a; const a = 1;
+assertEquals(1, a, "a has wrong value");
+a = 2;
+assertEquals(2, a, "a should be writable");
+
+var b = 1; const b = 2;
+assertEquals(2, b, "b has wrong value");
+
+var c = 1; const c = 2; const c = 3;
+assertEquals(3, c, "c has wrong value");
+
+const d = 1; const d = 2;
+assertEquals(1, d, "d has wrong value");
+
+const e = 1; var e = 2;
+assertEquals(1, e, "e has wrong value");
+
+const f = 1; const f;
+assertEquals(1, f, "f has wrong value");
+
+var g; const g = 1;
+assertEquals(1, g, "g has wrong value");
+g = 2;
+assertEquals(2, g, "g should be writable");
+
+const h; var h = 1;
+assertEquals(undefined,h,  "h has wrong value");
+
+eval("Object.defineProperty(this, 'i', { writable: true });"
+   + "const i = 7;"
+   + "assertEquals(7, i, \"i has wrong value\");");
+
+var global = this;
+assertThrows(function() {
+  Object.defineProperty(global, 'j', { writable: true })
+}, TypeError);
+const j = 2;  // This is what makes the function above throw, because the
+// const declaration gets hoisted and makes the property non-configurable.
+assertEquals(2, j, "j has wrong value");
+
+var k = 1; const k;
+// You could argue about the expected result here. For now, the winning
+// argument is that "const k;" is equivalent to "const k = undefined;".
+assertEquals(undefined, k, "k has wrong value");
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index a7f6cfa..fa43cbb 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -157,7 +157,7 @@
 // Test that -0 is treated correctly in MakeDay.
 var d = new Date();
 assertDoesNotThrow("d.setDate(-0)");
-assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0. -0)");
+assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0, -0)");
 assertDoesNotThrow("new Date(0x40000000, 0x40000000, 0x40000000," +
                    "0x40000000, 0x40000000, 0x40000000, 0x40000000)")
 assertDoesNotThrow("new Date(-0x40000001, -0x40000001, -0x40000001," +
@@ -178,7 +178,7 @@
 assertTrue(isNaN(Date.UTC(-271821, 3, 19)));
 
 
-// Test creation of large date values.
+// Test creation with large date values.
 d = new Date(1969, 12, 1, 99999999999);
 assertTrue(isNaN(d.getTime()));
 d = new Date(1969, 12, 1, -99999999999);
@@ -188,6 +188,12 @@
 d = new Date(1969, 12, 1, -Infinity);
 assertTrue(isNaN(d.getTime()));
 
+
+// Test creation with obscure date values.
+assertEquals(8640000000000000, Date.UTC(1970, 0, 1 + 100000001, -24));
+assertEquals(-8640000000000000, Date.UTC(1970, 0, 1 - 100000001, 24));
+
+
 // Parsing ES5 ISO-8601 dates.
 // When TZ is omitted, it defaults to 'Z' meaning UTC.
 
diff --git a/test/mjsunit/debug-break-inline.js b/test/mjsunit/debug-break-inline.js
new file mode 100644
index 0000000..4418fa8
--- /dev/null
+++ b/test/mjsunit/debug-break-inline.js
@@ -0,0 +1,100 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// This test tests that deoptimization due to debug breaks works for
+// inlined functions where the full-code is generated before the
+// debugger is attached.
+//
+//See http://code.google.com/p/chromium/issues/detail?id=105375
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+var count = 0;
+var break_count = 0;
+
+// Debug event listener which sets a breakpoint first time it is hit
+// and otherwise counts break points hit and checks that the expected
+// state is reached.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    break_count++;
+    if (break_count == 1) {
+      Debug.setBreakPoint(g, 3);
+
+      for (var i = 0; i < exec_state.frameCount(); i++) {
+        var frame = exec_state.frame(i);
+        // When function f is optimized (1 means YES, see runtime.cc) we
+        // expect an optimized frame for f and g.
+        if (%GetOptimizationStatus(f) == 1) {
+          if (i == 1) {
+            assertTrue(frame.isOptimizedFrame());
+            assertTrue(frame.isInlinedFrame());
+            assertEquals(4 - i, frame.inlinedFrameIndex());
+          } else if (i == 2) {
+            assertTrue(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          } else {
+            assertFalse(frame.isOptimizedFrame());
+            assertFalse(frame.isInlinedFrame());
+          }
+        }
+      }
+    }
+  }
+}
+
+function f() {
+  g();
+}
+
+function g() {
+  count++;
+  h();
+  var b = 1;  // Break point is set here.
+}
+
+function h() {
+  debugger;
+}
+
+f();f();f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+f();
+
+assertEquals(5, count);
+assertEquals(2, break_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-evaluate-locals-optimized-double.js b/test/mjsunit/debug-evaluate-locals-optimized-double.js
index 8447df5..9ed1dbb 100644
--- a/test/mjsunit/debug-evaluate-locals-optimized-double.js
+++ b/test/mjsunit/debug-evaluate-locals-optimized-double.js
@@ -50,12 +50,10 @@
           var expected_y = (i + 1) * 2 + 2 + ((i + 1) * 2 + 2) / 100;
 
           // All frames except the bottom one has normal variables a and b.
-          var a = ('a' === frame.localName(0)) ? 0 : 1;
-          var b = 1 - a;
-          assertEquals('a', frame.localName(a));
-          assertEquals('b', frame.localName(b));
-          assertEquals(expected_a, frame.localValue(a).value());
-          assertEquals(expected_b, frame.localValue(b).value());
+          assertEquals('a', frame.localName(0));
+          assertEquals('b', frame.localName(1));
+          assertEquals(expected_a, frame.localValue(0).value());
+          assertEquals(expected_b, frame.localValue(1).value());
 
           // All frames except the bottom one has arguments variables x and y.
           assertEquals('x', frame.argumentName(0));
diff --git a/test/mjsunit/debug-evaluate-locals-optimized.js b/test/mjsunit/debug-evaluate-locals-optimized.js
index c3cd5eb..683c139 100644
--- a/test/mjsunit/debug-evaluate-locals-optimized.js
+++ b/test/mjsunit/debug-evaluate-locals-optimized.js
@@ -50,12 +50,10 @@
           var expected_y = (i + 1) * 2 + 2;
 
           // All frames except the bottom one has normal variables a and b.
-          var a = ('a' === frame.localName(0)) ? 0 : 1;
-          var b = 1 - a;
-          assertEquals('a', frame.localName(a));
-          assertEquals('b', frame.localName(b));
-          assertEquals(expected_a, frame.localValue(a).value());
-          assertEquals(expected_b, frame.localValue(b).value());
+          assertEquals('a', frame.localName(0));
+          assertEquals('b', frame.localName(1));
+          assertEquals(expected_a, frame.localValue(0).value());
+          assertEquals(expected_b, frame.localValue(1).value());
 
           // All frames except the bottom one has arguments variables x and y.
           assertEquals('x', frame.argumentName(0));
@@ -121,7 +119,7 @@
       listenerComplete = true;
     }
   } catch (e) {
-    exception = e.stack;
+    exception = e
   };
 };
 
diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index 1c23b0b..942bd2b 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
-
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug;
+var Debug = debug.Debug;
 
 var test_name;
 var listener_delegate;
@@ -439,6 +438,26 @@
 EndTest();
 
 
+// With block in function that is marked for optimization while being executed.
+BeginTest("With 7");
+
+function with_7() {
+  with({}) {
+    %OptimizeFunctionOnNextCall(with_7);
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+};
+with_7();
+EndTest();
+
+
 // Simple closure formed by returning an inner function referering the outer
 // functions arguments.
 BeginTest("Closure 1");
@@ -950,6 +969,28 @@
 EndTest();
 
 
+// Catch block in function that is marked for optimization while being executed.
+BeginTest("Catch block 7");
+function catch_block_7() {
+  %OptimizeFunctionOnNextCall(catch_block_7);
+  try {
+    throw 'Exception';
+  } catch (e) {
+    debugger;
+  }
+};
+
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Catch,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({e:'Exception'}, 0, exec_state);
+};
+catch_block_7();
+EndTest();
+
+
 assertEquals(begin_test_count, break_count,
              'one or more tests did not enter the debugger');
 assertEquals(begin_test_count, end_test_count,
diff --git a/test/mjsunit/debug-step-3.js b/test/mjsunit/debug-step-3.js
new file mode 100644
index 0000000..9cac0f5
--- /dev/null
+++ b/test/mjsunit/debug-step-3.js
@@ -0,0 +1,94 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var bp;
+var done = false;
+var step_count = 0;
+var set_bp = false
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
+    step_count++;
+  }
+};
+
+// Set the global variables state to prpare the stepping test.
+function prepare_step_test() {
+  done = false;
+  step_count = 0;
+}
+
+// Test function to step through.
+function f() {
+  var a = 0;
+  if (set_bp) { bp = Debug.setBreakPoint(f, 3); }
+  var i = 1;
+  var j = 2;
+  done = true;
+};
+
+prepare_step_test();
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Make f set a breakpoint with an activation on the stack.
+prepare_step_test();
+set_bp = true;
+f();
+// TODO(1782): Fix issue to bring back this assert.
+//assertEquals(4, step_count);
+Debug.clearBreakPoint(bp);
+
+// Set a breakpoint on the first var statement (line 1).
+set_bp = false;
+bp = Debug.setBreakPoint(f, 3);
+
+// Step through the function ensuring that the var statements are hit as well.
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+
+// Clear the breakpoint and check that no stepping happens.
+Debug.clearBreakPoint(bp);
+prepare_step_test();
+f();
+assertEquals(0, step_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/test/mjsunit/debug-stepout-scope.js b/test/mjsunit/debug-stepout-scope.js
new file mode 100644
index 0000000..9c040da
--- /dev/null
+++ b/test/mjsunit/debug-stepout-scope.js
@@ -0,0 +1,423 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --expose-natives-as=builtins
+
+// Check that the ScopeIterator can properly recreate the scope at
+// every point when stepping through functions.
+
+var Debug = debug.Debug;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    // Access scope details.
+    var scope_count = exec_state.frame().scopeCount();
+    for (var i = 0; i < scope_count; i++) {
+      var scope = exec_state.frame().scope(i);
+      // assertTrue(scope.isScope());
+      scope.scopeType();
+      scope.scopeObject();
+    }
+
+    // Do steps until we reach the global scope again.
+    if (true) {
+      exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
+    }
+  }
+}
+
+Debug.setListener(listener);
+
+
+function test1() {
+  debugger;
+  with ({x:1}) {
+    x = 2;
+  }
+}
+test1();
+
+
+function test2() {
+  if (true) {
+    with ({}) {
+      debugger;
+    }
+  } else {
+    with ({}) {
+      return 10;
+    }
+  }
+}
+test2();
+
+
+function test3() {
+  if (true) {
+    debugger;
+  } else {
+    with ({}) {
+      return 10;
+    }
+  }
+}
+test3();
+
+
+function test4() {
+  debugger;
+  with ({x:1}) x = 1
+}
+test4();
+
+
+function test5() {
+  debugger;
+  var dummy = 1;
+  with ({}) {
+    with ({}) {
+      dummy = 2;
+    }
+  }
+  dummy = 3;
+}
+test5();
+
+
+function test6() {
+  debugger;
+  try {
+    throw 'stuff';
+  } catch (e) {
+    e = 1;
+  }
+}
+test6();
+
+
+function test7() {
+  debugger;
+  function foo() {}
+}
+test7();
+
+
+function test8() {
+  debugger;
+  (function foo() {})();
+}
+test8();
+
+
+var q = 42;
+var prefixes = [ "debugger; ",
+                 "if (false) { try { throw 0; } catch(x) { return x; } }; debugger; " ];
+var bodies = [ "1",
+               "1 ",
+               "1;",
+               "1; ",
+               "q",
+               "q ",
+               "q;",
+               "q; ",
+               "try { throw 'stuff' } catch (e) { e = 1; }",
+               "try { throw 'stuff' } catch (e) { e = 1; } ",
+               "try { throw 'stuff' } catch (e) { e = 1; };",
+               "try { throw 'stuff' } catch (e) { e = 1; }; " ];
+var with_bodies = [ "with ({}) {}",
+                    "with ({x:1}) x",
+                    "with ({x:1}) x = 1",
+                    "with ({x:1}) x ",
+                    "with ({x:1}) x = 1 ",
+                    "with ({x:1}) x;",
+                    "with ({x:1}) x = 1;",
+                    "with ({x:1}) x; ",
+                    "with ({x:1}) x = 1; " ];
+
+
+function test9() {
+  debugger;
+  for (var i = 0; i < prefixes.length; ++i) {
+    var pre = prefixes[i];
+    for (var j = 0; j < bodies.length; ++j) {
+      var body = bodies[j];
+      eval(pre + body);
+      eval("'use strict'; " + pre + body);
+    }
+    for (var j = 0; j < with_bodies.length; ++j) {
+      var body = with_bodies[j];
+      eval(pre + body);
+    }
+  }
+}
+test9();
+
+
+function test10() {
+  debugger;
+  with ({}) {
+    return 10;
+  }
+}
+test10();
+
+
+function test11() {
+  debugger;
+  try {
+    throw 'stuff';
+  } catch (e) {
+    return 10;
+  }
+}
+test11();
+
+
+// Test global eval and function constructor.
+for (var i = 0; i < prefixes.length; ++i) {
+  var pre = prefixes[i];
+  for (var j = 0; j < bodies.length; ++j) {
+    var body = bodies[j];
+    eval(pre + body);
+    eval("'use strict'; " + pre + body);
+    Function(pre + body)();
+  }
+  for (var j = 0; j < with_bodies.length; ++j) {
+    var body = with_bodies[j];
+    eval(pre + body);
+    Function(pre + body)();
+  }
+}
+
+
+try {
+  with({}) {
+    debugger;
+    eval("{}$%:^");
+  }
+} catch(e) {
+  nop();
+}
+
+// Return from function constructed with Function constructor.
+var anon = 12;
+for (var i = 0; i < prefixes.length; ++i) {
+  var pre = prefixes[i];
+  Function(pre + "return 42")();
+  Function(pre + "return 42 ")();
+  Function(pre + "return 42;")();
+  Function(pre + "return 42; ")();
+  Function(pre + "return anon")();
+  Function(pre + "return anon ")();
+  Function(pre + "return anon;")();
+  Function(pre + "return anon; ")();
+}
+
+
+function nop() {}
+
+
+function stress() {
+  debugger;
+
+  L: with ({x:12}) {
+    break L;
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'inner'}) {
+        break label;
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'inner'}) {
+        break label;
+      }
+    }
+    nop();
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          break label;
+        }
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          break label;
+        }
+      }
+    }
+    nop();
+  }
+
+
+  with ({x: 'outer'}) {
+    for (var i = 0; i < 3; ++i) {
+      with ({x: 'inner' + i}) {
+        continue;
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    label: for (var i = 0; i < 3; ++i) {
+      with ({x: 'middle' + i}) {
+        for (var j = 0; j < 3; ++j) {
+          with ({x: 'inner' + j}) {
+            continue label;
+          }
+        }
+      }
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      nop();
+    }
+  }
+
+
+  with ({x: 'outer'}) {
+    try {
+      with ({x: 'middle'}) {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      }
+    } catch (e) {
+    }
+  }
+
+
+  try {
+    with ({x: 'outer'}) {
+      try {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      } finally {
+      }
+    }
+  } catch (e) {
+  }
+
+
+  try {
+    with ({x: 'outer'}) {
+      try {
+        with ({x: 'inner'}) {
+          throw 0;
+        }
+      } finally {
+        nop();
+      }
+    }
+  } catch (e) {
+  }
+
+
+  function stress1() {
+    with ({x:12}) {
+      return x;
+    }
+  }
+  stress1();
+
+
+  function stress2() {
+    with ({x: 'outer'}) {
+      with ({x: 'inner'}) {
+        return x;
+      }
+    }
+  }
+  stress2();
+
+  function stress3() {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      return e;
+    }
+  }
+  stress3();
+
+
+  function stress4() {
+    try {
+      with ({x: 'inner'}) {
+        throw 0;
+      }
+    } catch (e) {
+      with ({x: 'inner'}) {
+        return e;
+      }
+    }
+  }
+  stress4();
+
+}
+stress();
+
+
+// With block as the last(!) statement in global code.
+with ({}) { debugger; }
\ No newline at end of file
diff --git a/test/mjsunit/element-kind.js b/test/mjsunit/element-kind.js
deleted file mode 100644
index 48a029f..0000000
--- a/test/mjsunit/element-kind.js
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-// Test element kind of objects
-
-var element_kind = {
-  fast_elements                     :  1,
-  fast_double_elements              :  2,
-  dictionary_elements               :  3,
-  external_byte_elements            :  4,
-  external_unsigned_byte_elements   :  5,
-  external_short_elements           :  6,
-  external_unsigned_short_elements  :  7,
-  external_int_elements             :  8,
-  external_unsigned_int_elements    :  9,
-  external_float_elements           : 10,
-  external_double_elements          : 11,
-  external_pixel_elements           : 12
-}
-
-// We expect an object to only be of one element kind.
-function assertKind(expected, obj){
-  assertEquals(expected == element_kind.fast_elements,
-               %HasFastElements(obj));
-  assertEquals(expected == element_kind.fast_double_elements,
-               %HasFastDoubleElements(obj));
-  assertEquals(expected == element_kind.dictionary_elements,
-               %HasDictionaryElements(obj));
-  assertEquals(expected == element_kind.external_byte_elements,
-               %HasExternalByteElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_byte_elements,
-               %HasExternalUnsignedByteElements(obj));
-  assertEquals(expected == element_kind.external_short_elements,
-               %HasExternalShortElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_short_elements,
-               %HasExternalUnsignedShortElements(obj));
-  assertEquals(expected == element_kind.external_int_elements,
-               %HasExternalIntElements(obj));
-  assertEquals(expected == element_kind.external_unsigned_int_elements,
-               %HasExternalUnsignedIntElements(obj));
-  assertEquals(expected == element_kind.external_float_elements,
-               %HasExternalFloatElements(obj));
-  assertEquals(expected == element_kind.external_double_elements,
-               %HasExternalDoubleElements(obj));
-  assertEquals(expected == element_kind.external_pixel_elements,
-               %HasExternalPixelElements(obj));
-  // every external kind is also an external array
-  assertEquals(expected >= element_kind.external_byte_elements,
-               %HasExternalArrayElements(obj));
-}
-
-var me = {};
-assertKind(element_kind.fast_elements, me);
-me.dance = 0xD15C0;
-me.drink = 0xC0C0A;
-assertKind(element_kind.fast_elements, me);
-
-var you = new Array();
-for(i = 0; i < 1337; i++) {
-  you[i] = i;
-}
-assertKind(element_kind.fast_elements, you);
-
-assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
-
-// fast_double_elements not yet available
-
-
-assertKind(element_kind.external_byte_elements,           new Int8Array(9001));
-assertKind(element_kind.external_unsigned_byte_elements,  new Uint8Array(007));
-assertKind(element_kind.external_short_elements,          new Int16Array(666));
-assertKind(element_kind.external_unsigned_short_elements, new Uint16Array(42));
-assertKind(element_kind.external_int_elements,            new Int32Array(0xF));
-assertKind(element_kind.external_unsigned_int_elements,   new Uint32Array(23));
-assertKind(element_kind.external_float_elements,          new Float32Array(7));
-assertKind(element_kind.external_double_elements,         new Float64Array(0));
-assertKind(element_kind.external_pixel_elements,          new PixelArray(512));
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/elements-kind-depends.js
similarity index 68%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/elements-kind-depends.js
index 2502b53..82f188b 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/elements-kind-depends.js
@@ -25,22 +25,50 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax --smi-only-arrays
 
-var e = new Error();
-assertEquals('Error', e + '');
+function burn() {
+  var a = new Array(3);
+  a[0] = 10;
+  a[1] = 15.5;
+  a[2] = 20;
+  return a;
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+function check(a) {
+  assertEquals(10, a[0]);
+  assertEquals(15.5, a[1]);
+  assertEquals(20, a[2]);
+}
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var b;
+for (var i = 0; i < 3; ++i) {
+  b = burn();
+  check(b);  // all OK
+}
+%OptimizeFunctionOnNextCall(burn);
+b = burn();
+check(b);  // fails
+
+
+function loop_test(x) {
+  for (i=0;i<3;i++) {
+    x[i] = (i+1) * 0.5;
+  }
+}
+
+function check2(b) {
+  assertEquals(0.5, b[0]);
+  assertEquals(1.0, b[1]);
+  assertEquals(1.5, b[2]);
+}
+
+for (var i = 0; i < 3; ++i) {
+  b = [0,1,2];
+  loop_test(b);
+  check2(b);
+}
+%OptimizeFunctionOnNextCall(loop_test);
+b = [0,1,2];
+loop_test(b);
+check2(b);
diff --git a/test/mjsunit/elements-kind.js b/test/mjsunit/elements-kind.js
new file mode 100644
index 0000000..8a8a3c7
--- /dev/null
+++ b/test/mjsunit/elements-kind.js
@@ -0,0 +1,340 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case.  Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiOnlyElements([]);
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
+
+var elements_kind = {
+  fast_smi_only            :  'fast smi only elements',
+  fast                     :  'fast elements',
+  fast_double              :  'fast double elements',
+  dictionary               :  'dictionary elements',
+  external_byte            :  'external byte elements',
+  external_unsigned_byte   :  'external unsigned byte elements',
+  external_short           :  'external short elements',
+  external_unsigned_short  :  'external unsigned short elements',
+  external_int             :  'external int elements',
+  external_unsigned_int    :  'external unsigned int elements',
+  external_float           :  'external float elements',
+  external_double          :  'external double elements',
+  external_pixel           :  'external pixel elements'
+}
+
+function getKind(obj) {
+  if (%HasFastSmiOnlyElements(obj)) return elements_kind.fast_smi_only;
+  if (%HasFastElements(obj)) return elements_kind.fast;
+  if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
+  if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+  // Every external kind is also an external array.
+  assertTrue(%HasExternalArrayElements(obj));
+  if (%HasExternalByteElements(obj)) {
+    return elements_kind.external_byte;
+  }
+  if (%HasExternalUnsignedByteElements(obj)) {
+    return elements_kind.external_unsigned_byte;
+  }
+  if (%HasExternalShortElements(obj)) {
+    return elements_kind.external_short;
+  }
+  if (%HasExternalUnsignedShortElements(obj)) {
+    return elements_kind.external_unsigned_short;
+  }
+  if (%HasExternalIntElements(obj)) {
+    return elements_kind.external_int;
+  }
+  if (%HasExternalUnsignedIntElements(obj)) {
+    return elements_kind.external_unsigned_int;
+  }
+  if (%HasExternalFloatElements(obj)) {
+    return elements_kind.external_float;
+  }
+  if (%HasExternalDoubleElements(obj)) {
+    return elements_kind.external_double;
+  }
+  if (%HasExternalPixelElements(obj)) {
+    return elements_kind.external_pixel;
+  }
+}
+
+function assertKind(expected, obj, name_opt) {
+  if (!support_smi_only_arrays &&
+      expected == elements_kind.fast_smi_only) {
+    expected = elements_kind.fast;
+  }
+  assertEquals(expected, getKind(obj), name_opt);
+}
+
+var me = {};
+assertKind(elements_kind.fast, me);
+me.dance = 0xD15C0;
+me.drink = 0xC0C0A;
+assertKind(elements_kind.fast, me);
+
+var too = [1,2,3];
+assertKind(elements_kind.fast_smi_only, too);
+too.dance = 0xD15C0;
+too.drink = 0xC0C0A;
+assertKind(elements_kind.fast_smi_only, too);
+
+// Make sure the element kind transitions from smionly when a non-smi is stored.
+var you = new Array();
+assertKind(elements_kind.fast_smi_only, you);
+for (var i = 0; i < 1337; i++) {
+  var val = i;
+  if (i == 1336) {
+    assertKind(elements_kind.fast_smi_only, you);
+    val = new Object();
+  }
+  you[i] = val;
+}
+assertKind(elements_kind.fast, you);
+
+assertKind(elements_kind.dictionary, new Array(0xDECAF));
+
+var fast_double_array = new Array(0xDECAF);
+for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
+assertKind(elements_kind.fast_double, fast_double_array);
+
+assertKind(elements_kind.external_byte,           new Int8Array(9001));
+assertKind(elements_kind.external_unsigned_byte,  new Uint8Array(007));
+assertKind(elements_kind.external_short,          new Int16Array(666));
+assertKind(elements_kind.external_unsigned_short, new Uint16Array(42));
+assertKind(elements_kind.external_int,            new Int32Array(0xF));
+assertKind(elements_kind.external_unsigned_int,   new Uint32Array(23));
+assertKind(elements_kind.external_float,          new Float32Array(7));
+assertKind(elements_kind.external_double,         new Float64Array(0));
+assertKind(elements_kind.external_pixel,          new PixelArray(512));
+
+// Crankshaft support for smi-only array elements.
+function monomorphic(array) {
+  for (var i = 0; i < 3; i++) {
+    array[i] = i + 10;
+  }
+  assertKind(elements_kind.fast_smi_only, array);
+  for (var i = 0; i < 3; i++) {
+    var a = array[i];
+    assertEquals(i + 10, a);
+  }
+}
+var smi_only = [1, 2, 3];
+for (var i = 0; i < 3; i++) monomorphic(smi_only);
+%OptimizeFunctionOnNextCall(monomorphic);
+monomorphic(smi_only);
+
+if (support_smi_only_arrays) {
+  function construct_smis() {
+    var a = [0, 0, 0];
+    a[0] = 0;  // Send the COW array map to the steak house.
+    assertKind(elements_kind.fast_smi_only, a);
+    return a;
+  }
+  function construct_doubles() {
+    var a = construct_smis();
+    a[0] = 1.5;
+    assertKind(elements_kind.fast_double, a);
+    return a;
+  }
+  function construct_objects() {
+    var a = construct_smis();
+    a[0] = "one";
+    assertKind(elements_kind.fast, a);
+    return a;
+  }
+
+  // Test crankshafted transition SMI->DOUBLE.
+  function convert_to_double(array) {
+    array[1] = 2.5;
+    assertKind(elements_kind.fast_double, array);
+    assertEquals(2.5, array[1]);
+  }
+  var smis = construct_smis();
+  for (var i = 0; i < 3; i++) convert_to_double(smis);
+  %OptimizeFunctionOnNextCall(convert_to_double);
+  smis = construct_smis();
+  convert_to_double(smis);
+  // Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
+  function convert_to_fast(array) {
+    array[1] = "two";
+    assertKind(elements_kind.fast, array);
+    assertEquals("two", array[1]);
+  }
+  smis = construct_smis();
+  for (var i = 0; i < 3; i++) convert_to_fast(smis);
+  var doubles = construct_doubles();
+  for (var i = 0; i < 3; i++) convert_to_fast(doubles);
+  smis = construct_smis();
+  doubles = construct_doubles();
+  %OptimizeFunctionOnNextCall(convert_to_fast);
+  convert_to_fast(smis);
+  convert_to_fast(doubles);
+  // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
+  // transition to FAST directly).
+  function convert_mixed(array, value, kind) {
+    array[1] = value;
+    assertKind(kind, array);
+    assertEquals(value, array[1]);
+  }
+  smis = construct_smis();
+  for (var i = 0; i < 3; i++) {
+    convert_mixed(smis, 1.5, elements_kind.fast_double);
+  }
+  doubles = construct_doubles();
+  for (var i = 0; i < 3; i++) {
+    convert_mixed(doubles, "three", elements_kind.fast);
+  }
+  smis = construct_smis();
+  doubles = construct_doubles();
+  %OptimizeFunctionOnNextCall(convert_mixed);
+  convert_mixed(smis, 1, elements_kind.fast);
+  convert_mixed(doubles, 1, elements_kind.fast);
+  assertTrue(%HaveSameMap(smis, doubles));
+}
+
+// Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; }  // Used to generate dynamic values.
+
+function crankshaft_test() {
+  var a = [get(1), get(2), get(3)];
+  assertKind(elements_kind.fast_smi_only, a);
+  var b = [get(1), get(2), get("three")];
+  assertKind(elements_kind.fast, b);
+  var c = [get(1), get(2), get(3.5)];
+  if (support_smi_only_arrays) {
+    assertKind(elements_kind.fast_double, c);
+  } else {
+    assertKind(elements_kind.fast, c);
+  }
+}
+for (var i = 0; i < 3; i++) {
+  crankshaft_test();
+}
+%OptimizeFunctionOnNextCall(crankshaft_test);
+crankshaft_test();
+
+// Elements_kind transitions for arrays.
+
+// A map can have three different elements_kind transitions: SMI->DOUBLE,
+// DOUBLE->OBJECT, and SMI->OBJECT. No matter in which order these three are
+// created, they must always end up with the same FAST map.
+
+// This test is meaningless without FAST_SMI_ONLY_ELEMENTS.
+if (support_smi_only_arrays) {
+  // Preparation: create one pair of identical objects for each case.
+  var a = [1, 2, 3];
+  var b = [1, 2, 3];
+  assertTrue(%HaveSameMap(a, b));
+  assertKind(elements_kind.fast_smi_only, a);
+  var c = [1, 2, 3];
+  c["case2"] = true;
+  var d = [1, 2, 3];
+  d["case2"] = true;
+  assertTrue(%HaveSameMap(c, d));
+  assertFalse(%HaveSameMap(a, c));
+  assertKind(elements_kind.fast_smi_only, c);
+  var e = [1, 2, 3];
+  e["case3"] = true;
+  var f = [1, 2, 3];
+  f["case3"] = true;
+  assertTrue(%HaveSameMap(e, f));
+  assertFalse(%HaveSameMap(a, e));
+  assertFalse(%HaveSameMap(c, e));
+  assertKind(elements_kind.fast_smi_only, e);
+  // Case 1: SMI->DOUBLE, DOUBLE->OBJECT, SMI->OBJECT.
+  a[0] = 1.5;
+  assertKind(elements_kind.fast_double, a);
+  a[0] = "foo";
+  assertKind(elements_kind.fast, a);
+  b[0] = "bar";
+  assertTrue(%HaveSameMap(a, b));
+  // Case 2: SMI->DOUBLE, SMI->OBJECT, DOUBLE->OBJECT.
+  c[0] = 1.5;
+  assertKind(elements_kind.fast_double, c);
+  assertFalse(%HaveSameMap(c, d));
+  d[0] = "foo";
+  assertKind(elements_kind.fast, d);
+  assertFalse(%HaveSameMap(c, d));
+  c[0] = "bar";
+  assertTrue(%HaveSameMap(c, d));
+  // Case 3: SMI->OBJECT, SMI->DOUBLE, DOUBLE->OBJECT.
+  e[0] = "foo";
+  assertKind(elements_kind.fast, e);
+  assertFalse(%HaveSameMap(e, f));
+  f[0] = 1.5;
+  assertKind(elements_kind.fast_double, f);
+  assertFalse(%HaveSameMap(e, f));
+  f[0] = "bar";
+  assertKind(elements_kind.fast, f);
+  assertTrue(%HaveSameMap(e, f));
+}
+
+// Test if Array.concat() works correctly with DOUBLE elements.
+if (support_smi_only_arrays) {
+  var a = [1, 2];
+  assertKind(elements_kind.fast_smi_only, a);
+  var b = [4.5, 5.5];
+  assertKind(elements_kind.fast_double, b);
+  var c = a.concat(b);
+  assertEquals([1, 2, 4.5, 5.5], c);
+  // TODO(1810): Change implementation so that we get DOUBLE elements here?
+  assertKind(elements_kind.fast, c);
+}
+
+// Test that Array.push() correctly handles SMI elements.
+if (support_smi_only_arrays) {
+  var a = [1, 2];
+  assertKind(elements_kind.fast_smi_only, a);
+  a.push(3, 4, 5);
+  assertKind(elements_kind.fast_smi_only, a);
+  assertEquals([1, 2, 3, 4, 5], a);
+}
+
+// Test that Array.splice() and Array.slice() return correct ElementsKinds.
+if (support_smi_only_arrays) {
+  var a = ["foo", "bar"];
+  assertKind(elements_kind.fast, a);
+  var b = a.splice(0, 1);
+  assertKind(elements_kind.fast, b);
+  var c = a.slice(0, 1);
+  assertKind(elements_kind.fast, c);
+}
+
+// Throw away type information in the ICs for next stress run.
+gc();
diff --git a/test/mjsunit/elements-transition.js b/test/mjsunit/elements-transition.js
new file mode 100644
index 0000000..5f6cc4f
--- /dev/null
+++ b/test/mjsunit/elements-transition.js
@@ -0,0 +1,107 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays
+
+support_smi_only_arrays = %HasFastSmiOnlyElements([]);
+
+if (support_smi_only_arrays) {
+  function test(test_double, test_object, set, length) {
+    // We apply the same operations to two identical arrays.  The first array
+    // triggers an IC miss, upon which the conversion stub is generated, but the
+    // actual conversion is done in runtime.  The second array, arriving at
+    // the previously patched IC, is then converted using the conversion stub.
+    var array_1 = new Array(length);
+    var array_2 = new Array(length);
+
+    assertTrue(%HasFastSmiOnlyElements(array_1));
+    assertTrue(%HasFastSmiOnlyElements(array_2));
+    for (var i = 0; i < length; i++) {
+      if (i == length - 5 && test_double) {
+        // Trigger conversion to fast double elements at length-5.
+        set(array_1, i, 0.5);
+        set(array_2, i, 0.5);
+        assertTrue(%HasFastDoubleElements(array_1));
+        assertTrue(%HasFastDoubleElements(array_2));
+      } else if (i == length - 3 && test_object) {
+        // Trigger conversion to fast object elements at length-3.
+        set(array_1, i, 'object');
+        set(array_2, i, 'object');
+        assertTrue(%HasFastElements(array_1));
+        assertTrue(%HasFastElements(array_2));
+      } else if (i != length - 7) {
+        // Set the element to an integer but leave a hole at length-7.
+        set(array_1, i, 2*i+1);
+        set(array_2, i, 2*i+1);
+      }
+    }
+
+    for (var i = 0; i < length; i++) {
+      if (i == length - 5 && test_double) {
+        assertEquals(0.5, array_1[i]);
+        assertEquals(0.5, array_2[i]);
+      } else if (i == length - 3 && test_object) {
+        assertEquals('object', array_1[i]);
+        assertEquals('object', array_2[i]);
+      } else if (i != length - 7) {
+        assertEquals(2*i+1, array_1[i]);
+        assertEquals(2*i+1, array_2[i]);
+      } else {
+        assertEquals(undefined, array_1[i]);
+        assertEquals(undefined, array_2[i]);
+      }
+    }
+
+    assertEquals(length, array_1.length);
+    assertEquals(length, array_2.length);
+  }
+
+  test(false, false, function(a,i,v){ a[i] = v; }, 20);
+  test(true,  false, function(a,i,v){ a[i] = v; }, 20);
+  test(false, true,  function(a,i,v){ a[i] = v; }, 20);
+  test(true,  true,  function(a,i,v){ a[i] = v; }, 20);
+
+  test(false, false, function(a,i,v){ a[i] = v; }, 10000);
+  test(true,  false, function(a,i,v){ a[i] = v; }, 10000);
+  test(false, true,  function(a,i,v){ a[i] = v; }, 10000);
+  test(true,  true,  function(a,i,v){ a[i] = v; }, 10000);
+
+  // Check COW arrays
+  function get_cow() { return [1, 2, 3]; }
+
+  function transition(x) { x[0] = 1.5; }
+
+  var ignore = get_cow();
+  transition(ignore);  // Handled by runtime.
+  var a = get_cow();
+  var b = get_cow();
+  transition(a);  // Handled by IC.
+  assertEquals(1.5, a[0]);
+  assertEquals(1, b[0]);
+} else {
+  print("Test skipped because smi only arrays are not supported.");
+}
\ No newline at end of file
diff --git a/test/mjsunit/error-tostring.js b/test/mjsunit/error-tostring.js
new file mode 100644
index 0000000..a285641
--- /dev/null
+++ b/test/mjsunit/error-tostring.js
@@ -0,0 +1,85 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Test default string representation of an Error object.
+
+var e = new Error();
+assertEquals('Error', e.toString());
+
+
+// Test printing of cyclic errors which return the empty string for
+// compatibility with Safari and Firefox.
+
+e = new Error();
+e.name = e;
+e.message = e;
+e.stack = "Does not occur in output";
+e.arguments = "Does not occur in output";
+e.type = "Does not occur in output";
+assertEquals('', e.toString());
+
+e = new Error();
+e.name = [ e ];
+e.message = [ e ];
+e.stack = "Does not occur in output";
+e.arguments = "Does not occur in output";
+e.type = "Does not occur in output";
+assertEquals('', e.toString());
+
+
+// Test the sequence in which getters and toString operations are called
+// on a given Error object.  Verify the produced string representation.
+
+function testErrorToString(nameValue, messageValue) {
+  var seq = [];
+  var e = {
+    get name() {
+      seq.push(1);
+      return (nameValue === undefined) ? nameValue : {
+        toString: function() { seq.push(2); return nameValue; }
+      };
+    },
+    get message() {
+      seq.push(3);
+      return (messageValue === undefined) ? messageValue : {
+        toString: function() { seq.push(4); return messageValue; }
+      };
+    }
+  };
+  var string = Error.prototype.toString.call(e);
+  return [string,seq];
+}
+
+assertEquals(["Error",[1,3]], testErrorToString(undefined, undefined));
+assertEquals(["e1",[1,2,3]], testErrorToString("e1", undefined));
+assertEquals(["e1: null",[1,2,3,4]], testErrorToString("e1", null));
+assertEquals(["e1",[1,2,3,4]], testErrorToString("e1", ""));
+assertEquals(["Error: e2",[1,3,4]], testErrorToString(undefined, "e2"));
+assertEquals(["null: e2",[1,2,3,4]], testErrorToString(null, "e2"));
+assertEquals(["e2",[1,2,3,4]], testErrorToString("", "e2"));
+assertEquals(["e1: e2",[1,2,3,4]], testErrorToString("e1", "e2"));
diff --git a/test/mjsunit/eval.js b/test/mjsunit/eval.js
index b6284ba..100f216 100644
--- a/test/mjsunit/eval.js
+++ b/test/mjsunit/eval.js
@@ -39,7 +39,7 @@
 
 try {
   eval('hest 7 &*^*&^');
-  assertTrue(false, 'Did not throw on syntax error.');
+  assertUnreachable('Did not throw on syntax error.');
 } catch (e) {
   assertEquals('SyntaxError', e.name);
 }
@@ -108,6 +108,7 @@
 result =
   (function() {
     var foo = 2;
+    // Should be non-direct call.
     return x.eval('foo');
   })();
 assertEquals(0, result);
@@ -115,12 +116,33 @@
 foo = 0;
 result =
   (function() {
+    var foo = 2;
+    // Should be non-direct call.
+    return (1,eval)('foo');
+  })();
+assertEquals(0, result);
+
+foo = 0;
+result =
+  (function() {
     var eval = function(x) { return x; };
     var foo = eval(2);
+    // Should be non-direct call.
     return e('foo');
   })();
 assertEquals(0, result);
 
+foo = 0;
+result =
+  (function() {
+    var foo = 2;
+    // Should be direct call.
+    with ({ eval : e }) {
+      return eval('foo');
+    }
+  })();
+assertEquals(2, result);
+
 result =
   (function() {
     var eval = function(x) { return 2 * x; };
@@ -135,19 +157,17 @@
   })();
 assertEquals(this, result);
 
-result =
-  (function() {
-    var obj = { f: function(eval) { return eval("this"); } };
-    return obj.f(eval);
-  })();
-assertEquals(this, result);
+(function() {
+  var obj = { f: function(eval) { return eval("this"); } };
+  result = obj.f(eval);
+  assertEquals(obj, result);
+})();
 
-result =
-  (function() {
-    var obj = { f: function(eval) { arguments; return eval("this"); } };
-    return obj.f(eval);
-  })();
-assertEquals(this, result);
+(function() {
+  var obj = { f: function(eval) { arguments; return eval("this"); } };
+  result = obj.f(eval);
+  assertEquals(obj, result);
+})();
 
 eval = function(x) { return 2 * x; };
 result =
@@ -156,6 +176,9 @@
   })();
 assertEquals(4, result);
 
+
+
+
 // Regression test: calling a function named eval found in a context that is
 // not the global context should get the global object as receiver.
 result =
diff --git a/test/mjsunit/function-bind.js b/test/mjsunit/function-bind.js
index e9d0221..4a8f2d2 100644
--- a/test/mjsunit/function-bind.js
+++ b/test/mjsunit/function-bind.js
@@ -29,29 +29,31 @@
 
 // Simple tests.
 function foo(x, y, z) {
-  return x + y + z;
+  return [this, arguments.length, x];
 }
 
+assertEquals(3, foo.length);
+
 var f = foo.bind(foo);
-assertEquals(3, f(1, 1, 1));
+assertEquals([foo, 3, 1], f(1, 2, 3));
 assertEquals(3, f.length);
 
-f = foo.bind(foo, 2);
-assertEquals(4, f(1, 1));
+f = foo.bind(foo, 1);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo, 2, 2);
-assertEquals(5, f(1));
+f = foo.bind(foo, 1, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 2, 2, 2);
-assertEquals(6, f());
+f = foo.bind(foo, 1, 2, 3);
+assertEquals([foo, 3, 1], f());
 assertEquals(0, f.length);
 
 // Test that length works correctly even if more than the actual number
 // of arguments are given when binding.
 f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
-assertEquals(6, f());
+assertEquals([foo, 9, 1], f());
 assertEquals(0, f.length);
 
 // Use a different bound object.
@@ -78,65 +80,98 @@
 // When only giving the thisArg, any number of binds should have
 // the same effect.
 f = foo.bind(foo);
-assertEquals(3, f(1, 1, 1));
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1, 1, 1));
+assertEquals([foo, 3, 1], f(1, 2, 3));
+
+var not_foo = {};
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(1, 2, 3));
 assertEquals(3, f.length);
 
 // Giving bound parameters should work at any place in the chain.
-f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo,1 ).bind(foo);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo,1 ).bind(not_foo);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1);
-assertEquals(3, f(1, 1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1);
+assertEquals([foo, 3, 1], f(2, 3));
 assertEquals(2, f.length);
 
-// Several parameters can be given, and given in different bind invokations.
-f = foo.bind(foo, 1, 1).bind(foo).bind(foo).bind(foo);
-assertEquals(3, f(1));
+// Several parameters can be given, and given in different bind invocations.
+f = foo.bind(foo, 1, 2).bind(not_foo).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(1));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo, 1, 1).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo, 1, 2).bind(not_foo);
+assertEquals([foo, 3, 1], f(1));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo).bind(not_foo).bind(not_foo, 1, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo, 1).bind(foo).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo, 2).bind(not_foo).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo).bind(foo, 1).bind(foo);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo, 2).bind(not_foo);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo, 1).bind(not_foo).bind(not_foo).bind(not_foo, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
-f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo, 1);
-assertEquals(3, f(1));
+f = foo.bind(foo).bind(not_foo, 1).bind(not_foo).bind(not_foo, 2);
+assertEquals([foo, 3, 1], f(3));
 assertEquals(1, f.length);
 
+// The wrong number of arguments can be given to bound functions too.
+f = foo.bind(foo);
+assertEquals(3, f.length);
+assertEquals([foo, 0, undefined], f());
+assertEquals([foo, 1, 1], f(1));
+assertEquals([foo, 2, 1], f(1, 2));
+assertEquals([foo, 3, 1], f(1, 2, 3));
+assertEquals([foo, 4, 1], f(1, 2, 3, 4));
+
+f = foo.bind(foo, 1);
+assertEquals(2, f.length);
+assertEquals([foo, 1, 1], f());
+assertEquals([foo, 2, 1], f(2));
+assertEquals([foo, 3, 1], f(2, 3));
+assertEquals([foo, 4, 1], f(2, 3, 4));
+
+f = foo.bind(foo, 1, 2);
+assertEquals(1, f.length);
+assertEquals([foo, 2, 1], f());
+assertEquals([foo, 3, 1], f(3));
+assertEquals([foo, 4, 1], f(3, 4));
+
+f = foo.bind(foo, 1, 2, 3);
+assertEquals(0, f.length);
+assertEquals([foo, 3, 1], f());
+assertEquals([foo, 4, 1], f(4));
+
+f = foo.bind(foo, 1, 2, 3, 4);
+assertEquals(0, f.length);
+assertEquals([foo, 4, 1], f());
+
 // Test constructor calls.
 
 function bar(x, y, z) {
@@ -171,13 +206,91 @@
 
 
 // Test bind chains when used as a constructor.
-
 f = bar.bind(bar, 1).bind(bar, 2).bind(bar, 3);
 obj2 = new f();
 assertEquals(1, obj2.x);
 assertEquals(2, obj2.y);
 assertEquals(3, obj2.z);
 
-// Test instanceof obj2 is bar, not f.
+// Test obj2 is instanceof both bar and f.
 assertTrue(obj2 instanceof bar);
-assertFalse(obj2 instanceof f);
+assertTrue(obj2 instanceof f);
+
+// This-args are not relevant to instanceof.
+f = bar.bind(foo.prototype, 1).
+    bind(String.prototype, 2).
+    bind(Function.prototype, 3);
+var obj3 = new f();
+assertTrue(obj3 instanceof bar);
+assertTrue(obj3 instanceof f);
+assertFalse(obj3 instanceof foo);
+assertFalse(obj3 instanceof Function);
+assertFalse(obj3 instanceof String);
+
+// thisArg is converted to object.
+f = foo.bind(undefined);
+assertEquals([this, 0, undefined], f());
+
+f = foo.bind(null);
+assertEquals([this, 0, undefined], f());
+
+f = foo.bind(42);
+assertEquals([Object(42), 0, undefined], f());
+
+f = foo.bind("foo");
+assertEquals([Object("foo"), 0, undefined], f());
+
+f = foo.bind(true);
+assertEquals([Object(true), 0, undefined], f());
+
+// Strict functions don't convert thisArg.
+function soo(x, y, z) {
+  "use strict";
+  return [this, arguments.length, x];
+}
+
+var s = soo.bind(undefined);
+assertEquals([undefined, 0, undefined], s());
+
+s = soo.bind(null);
+assertEquals([null, 0, undefined], s());
+
+s = soo.bind(42);
+assertEquals([42, 0, undefined], s());
+
+s = soo.bind("foo");
+assertEquals(["foo", 0, undefined], s());
+
+s = soo.bind(true);
+assertEquals([true, 0, undefined], s());
+
+// Test that .arguments and .caller are poisoned according to the ES5 spec.
+
+// Check that property descriptors are correct (unconfigurable, unenumerable,
+// and both get and set is the ThrowTypeError function).
+var cdesc = Object.getOwnPropertyDescriptor(f, "caller");
+var adesc = Object.getOwnPropertyDescriptor(f, "arguments");
+
+assertFalse(cdesc.enumerable);
+assertFalse(cdesc.configurable);
+
+assertFalse(adesc.enumerable);
+assertFalse(adesc.configurable);
+
+assertSame(cdesc.get, cdesc.set);
+assertSame(cdesc.get, adesc.get);
+assertSame(cdesc.get, adesc.set);
+
+assertTrue(cdesc.get instanceof Function);
+assertEquals(0, cdesc.get.length);
+assertThrows(cdesc.get, TypeError);
+
+assertThrows(function() { return f.caller; }, TypeError);
+assertThrows(function() { f.caller = 42; }, TypeError);
+assertThrows(function() { return f.arguments; }, TypeError);
+assertThrows(function() { f.arguments = 42; }, TypeError);
+
+// Shouldn't throw. Accessing the functions caller must throw if
+// the caller is strict and the callee isn't. A bound function is built-in,
+// but not considered strict.
+(function foo() { return foo.caller; }).bind()();
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index ff6677e..c4d18d0 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -163,6 +163,9 @@
   "PromoteScheduledException": true,
   "DeleteHandleScopeExtensions": true,
 
+  // Vararg with minimum number > 0.
+  "Call": true,
+
   // Requires integer arguments to be non-negative.
   "Apply": true,
 
diff --git a/test/mjsunit/global-const-var-conflicts.js b/test/mjsunit/global-const-var-conflicts.js
index d38d0ee..2fca96f 100644
--- a/test/mjsunit/global-const-var-conflicts.js
+++ b/test/mjsunit/global-const-var-conflicts.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Check that dynamically introducing conflicting consts/vars
-// leads to exceptions.
+// is silently ignored (and does not lead to exceptions).
 
 var caught = 0;
 
@@ -46,12 +46,12 @@
 try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
 assertTrue(typeof c == 'undefined');
 try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof c == 'undefined');
+assertEquals(1, c);
 
 eval("var d = 0");
 try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(undefined, d);
 try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(1, d);
 
-assertEquals(8, caught);
+assertEquals(0, caught);
diff --git a/test/mjsunit/harmony/block-conflicts.js b/test/mjsunit/harmony/block-conflicts.js
index 8d3de6f..ee2d979 100644
--- a/test/mjsunit/harmony/block-conflicts.js
+++ b/test/mjsunit/harmony/block-conflicts.js
@@ -25,10 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
 
 // Test for conflicting variable bindings.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
 function CheckException(e) {
   var string = e.toString();
   assertTrue(string.indexOf("has already been declared") >= 0 ||
@@ -80,6 +83,11 @@
                  "let x = function() {}",
                  "let x, y",
                  "let y, x",
+                 "const x = 0",
+                 "const x = undefined",
+                 "const x = function() {}",
+                 "const x = 2, y = 3",
+                 "const y = 4, x = 5",
                  ];
 var varbinds = [ "var x",
                  "var x = 0",
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/harmony/block-early-errors.js
similarity index 72%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/harmony/block-early-errors.js
index 2502b53..791f001 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/harmony/block-early-errors.js
@@ -25,22 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --harmony-scoping
 
-var e = new Error();
-assertEquals('Error', e + '');
+function CheckException(e) {
+  var string = e.toString();
+  assertInstanceof(e, SyntaxError);
+  assertTrue(string.indexOf("Illegal let") >= 0);
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+function Check(str) {
+  try {
+    eval("(function () { " + str + " })");
+    assertUnreachable();
+  } catch (e) {
+    CheckException(e);
+  }
+  try {
+    eval("(function () { { " + str + " } })");
+    assertUnreachable();
+  } catch (e) {
+    CheckException(e);
+  }
+}
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Check for early syntax errors when using let
+// declarations outside of extended mode.
+Check("let x;");
+Check("let x = 1;");
+Check("let x, y;");
diff --git a/test/mjsunit/harmony/block-for.js b/test/mjsunit/harmony/block-for.js
new file mode 100644
index 0000000..e84f0d2
--- /dev/null
+++ b/test/mjsunit/harmony/block-for.js
@@ -0,0 +1,146 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+function props(x) {
+  var array = [];
+  for (let p in x) array.push(p);
+  return array.sort();
+}
+
+assertEquals(0, props({}).length);
+assertEquals(1, props({x:1}).length);
+assertEquals(2, props({x:1, y:2}).length);
+
+assertArrayEquals(["x"], props({x:1}));
+assertArrayEquals(["x", "y"], props({x:1, y:2}));
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+
+assertEquals(0, props([]).length);
+assertEquals(1, props([1]).length);
+assertEquals(2, props([1,2]).length);
+
+assertArrayEquals(["0"], props([1]));
+assertArrayEquals(["0", "1"], props([1,2]));
+assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+
+var o = {};
+var a = [];
+let i = "outer_i";
+let s = "outer_s";
+for (let i = 0x0020; i < 0x01ff; i+=2) {
+  let s = 'char:' + String.fromCharCode(i);
+  a.push(s);
+  o[s] = i;
+}
+assertArrayEquals(a, props(o));
+assertEquals(i, "outer_i");
+assertEquals(s, "outer_s");
+
+var a = [];
+assertEquals(0, props(a).length);
+a[Math.pow(2,30)-1] = 0;
+assertEquals(1, props(a).length);
+a[Math.pow(2,31)-1] = 0;
+assertEquals(2, props(a).length);
+a[1] = 0;
+assertEquals(3, props(a).length);
+
+var result = '';
+for (let p in {a : [0], b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in {a : {v:1}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+
+// Check that there is exactly one variable without initializer
+// in a for-in statement with let variables.
+// TODO(ES6): properly activate extended mode
+assertThrows("function foo() { 'use strict'; for (let in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y = 4 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y = 4 in {}) { } }", SyntaxError);
+
+
+// In a normal for statement the iteration variable is not
+// freshly allocated for each iteration.
+function closures1() {
+  let a = [];
+  for (let i = 0; i < 5; ++i) {
+    a.push(function () { return i; });
+  }
+  for (let j = 0; j < 5; ++j) {
+    assertEquals(5, a[j]());
+  }
+}
+closures1();
+
+
+function closures2() {
+  let a = [], b = [];
+  for (let i = 0, j = 10; i < 5; ++i, ++j) {
+    a.push(function () { return i; });
+    b.push(function () { return j; });
+  }
+  for (let k = 0; k < 5; ++k) {
+    assertEquals(5, a[k]());
+    assertEquals(15, b[k]());
+  }
+}
+closures2();
+
+
+// In a for-in statement the iteration variable is fresh
+// for earch iteration.
+function closures3(x) {
+  let a = [];
+  for (let p in x) {
+    a.push(function () { return p; });
+  }
+  let k = 0;
+  for (let q in x) {
+    assertEquals(q, a[k]());
+    ++k;
+  }
+}
+closures3({a : [0], b : 1, c : {v : 1}, get d() {}, set e(x) {}});
diff --git a/test/mjsunit/harmony/block-leave.js b/test/mjsunit/harmony/block-leave.js
index 73eaf29..a7f6b69 100644
--- a/test/mjsunit/harmony/block-leave.js
+++ b/test/mjsunit/harmony/block-leave.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // We want to test the context chain shape.  In each of the tests cases
 // below, the outer with is to force a runtime lookup of the identifier 'x'
@@ -64,31 +67,30 @@
 } catch (e) {
   caught = true;
   assertEquals(25, e);
-  with ({y:19}) {
-    assertEquals(19, y);
+  (function () {
     try {
       // NOTE: This checks that the block scope containing xx has been
       // removed from the context chain.
-      xx;
+      eval('xx');
       assertTrue(false);  // should not reach here
     } catch (e2) {
       assertTrue(e2 instanceof ReferenceError);
     }
-  }
+  })();
 }
 assertTrue(caught);
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'inner';
     break label;
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'middle';
     {
@@ -96,20 +98,20 @@
       break label;
     }
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   for (var i = 0; i < 10; ++i) {
     let x = 'inner' + i;
     continue;
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   label: for (var i = 0; i < 10; ++i) {
     let x = 'middle' + i;
     for (var j = 0; j < 10; ++j) {
@@ -117,21 +119,21 @@
       continue label;
     }
   }
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'inner';
     throw 0;
   } catch (e) {
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'middle';
     {
@@ -139,27 +141,27 @@
       throw 0;
     }
   } catch (e) {
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'inner';
       throw 0;
     } finally {
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'middle';
       {
@@ -167,9 +169,9 @@
         throw 0;
       }
     } finally {
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
@@ -179,47 +181,47 @@
 // from with.
 function f() {}
 
-with ({x: 'outer'}) {
+(function(x) {
   label: {
     let x = 'inner';
     break label;
   }
   f();  // The context could be restored from the stack after the call.
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   for (var i = 0; i < 10; ++i) {
     let x = 'inner';
     continue;
   }
   f();
-  assertEquals('outer', x);
-}
+  assertEquals('outer', eval('x'));
+})('outer');
 
 
-with ({x: 'outer'}) {
+(function(x) {
   try {
     let x = 'inner';
     throw 0;
   } catch (e) {
     f();
-    assertEquals('outer', x);
+    assertEquals('outer', eval('x'));
   }
-}
+})('outer');
 
 
 try {
-  with ({x: 'outer'}) {
+  (function(x) {
     try {
       let x = 'inner';
       throw 0;
     } finally {
       f();
-      assertEquals('outer', x);
+      assertEquals('outer', eval('x'));
     }
-  }
+  })('outer');
 } catch (e) {
   if (e instanceof MjsUnitAssertionError) throw e;
 }
diff --git a/test/mjsunit/harmony/block-let-crankshaft.js b/test/mjsunit/harmony/block-let-crankshaft.js
index c2fb96b..ba5bc0d 100644
--- a/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/test/mjsunit/harmony/block-let-crankshaft.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping --allow-natives-syntax
+// Flags: --harmony-scoping --allow-natives-syntax
+
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Test that temporal dead zone semantics for function and block scoped
 // ket bindings are handled by the optimizing compiler.
diff --git a/test/mjsunit/harmony/block-let-declaration.js b/test/mjsunit/harmony/block-let-declaration.js
index 49b6348..480e033 100644
--- a/test/mjsunit/harmony/block-let-declaration.js
+++ b/test/mjsunit/harmony/block-let-declaration.js
@@ -25,41 +25,113 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
 
 // Test let declarations in various settings.
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Global
 let x;
 let y = 2;
+const z = 4;
 
 // Block local
 {
   let y;
   let x = 3;
+  const z = 5;
 }
 
 assertEquals(undefined, x);
 assertEquals(2,y);
+assertEquals(4,z);
 
 if (true) {
   let y;
   assertEquals(undefined, y);
 }
 
+// Invalid declarations are early errors in harmony mode and thus should trigger
+// an exception in eval code during parsing, before even compiling or executing
+// the code. Thus the generated function is not called here.
 function TestLocalThrows(str, expect) {
-  assertThrows("(function(){" + str + "})()", expect);
+  assertThrows("(function(){ 'use strict'; " + str + "})", expect);
 }
 
 function TestLocalDoesNotThrow(str) {
-  assertDoesNotThrow("(function(){" + str + "})()");
+  assertDoesNotThrow("(function(){ 'use strict'; " + str + "})()");
 }
 
-// Unprotected statement
+// Test let declarations in statement positions.
 TestLocalThrows("if (true) let x;", SyntaxError);
+TestLocalThrows("if (true) {} else let x;", SyntaxError);
 TestLocalThrows("do let x; while (false)", SyntaxError);
 TestLocalThrows("while (false) let x;", SyntaxError);
+TestLocalThrows("label: let x;", SyntaxError);
+TestLocalThrows("for (;false;) let x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: let x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: let x; }", SyntaxError);
 
+// Test const declarations with initialisers in statement positions.
+TestLocalThrows("if (true) const x = 1;", SyntaxError);
+TestLocalThrows("if (true) {} else const x = 1;", SyntaxError);
+TestLocalThrows("do const x = 1; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x = 1;", SyntaxError);
+TestLocalThrows("label: const x = 1;", SyntaxError);
+TestLocalThrows("for (;false;) const x = 1;", SyntaxError);
+TestLocalThrows("switch (true) { case true: const x = 1; }", SyntaxError);
+TestLocalThrows("switch (true) { default: const x = 1; }", SyntaxError);
+
+// Test const declarations without initialisers.
+TestLocalThrows("const x;", SyntaxError);
+TestLocalThrows("const x = 1, y;", SyntaxError);
+TestLocalThrows("const x, y = 1;", SyntaxError);
+
+// Test const declarations without initialisers in statement positions.
+TestLocalThrows("if (true) const x;", SyntaxError);
+TestLocalThrows("if (true) {} else const x;", SyntaxError);
+TestLocalThrows("do const x; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x;", SyntaxError);
+TestLocalThrows("label: const x;", SyntaxError);
+TestLocalThrows("for (;false;) const x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: const x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: const x; }", SyntaxError);
+
+// Test var declarations in statement positions.
 TestLocalDoesNotThrow("if (true) var x;");
+TestLocalDoesNotThrow("if (true) {} else var x;");
 TestLocalDoesNotThrow("do var x; while (false)");
 TestLocalDoesNotThrow("while (false) var x;");
+TestLocalDoesNotThrow("label: var x;");
+TestLocalDoesNotThrow("for (;false;) var x;");
+TestLocalDoesNotThrow("switch (true) { case true: var x; }");
+TestLocalDoesNotThrow("switch (true) { default: var x; }");
+
+// Test function declarations in source element and
+// non-strict statement positions.
+function f() {
+  // Non-strict source element positions.
+  function g0() {
+    "use strict";
+    // Strict source element positions.
+    function h() { }
+    {
+      function h1() { }
+    }
+  }
+  {
+    function g1() { }
+  }
+}
+f();
+
+// Test function declarations in statement position in strict mode.
+TestLocalThrows("function f() { if (true) function g() {}", SyntaxError);
+TestLocalThrows("function f() { if (true) {} else function g() {}", SyntaxError);
+TestLocalThrows("function f() { do function g() {} while (false)", SyntaxError);
+TestLocalThrows("function f() { while (false) function g() {}", SyntaxError);
+TestLocalThrows("function f() { label: function g() {}", SyntaxError);
+TestLocalThrows("function f() { for (;false;) function g() {}", SyntaxError);
+TestLocalThrows("function f() { switch (true) { case true: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { switch (true) { default: function g() {} }", SyntaxError);
diff --git a/test/mjsunit/harmony/block-let-semantics.js b/test/mjsunit/harmony/block-let-semantics.js
index 198c3b4..d14e7cd 100644
--- a/test/mjsunit/harmony/block-let-semantics.js
+++ b/test/mjsunit/harmony/block-let-semantics.js
@@ -25,7 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-block-scoping
+// Flags: --harmony-scoping
+
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Test temporal dead zone semantics of let bound variables in
 // function and block scopes.
@@ -61,6 +64,7 @@
 TestAll('let x = x += 1');
 TestAll('let x = x++');
 TestAll('let x = ++x');
+TestAll('const x = x + 1');
 
 // Use before initialization in prior statement.
 TestAll('x + 1; let x;');
@@ -68,25 +72,36 @@
 TestAll('x += 1; let x;');
 TestAll('++x; let x;');
 TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
 
 TestAll('f(); let x; function f() { return x + 1; }');
 TestAll('f(); let x; function f() { x = 1; }');
 TestAll('f(); let x; function f() { x += 1; }');
 TestAll('f(); let x; function f() { ++x; }');
 TestAll('f(); let x; function f() { x++; }');
+TestAll('f(); const x = 1; function f() { return x; }');
 
 TestAll('f()(); let x; function f() { return function() { return x + 1; } }');
 TestAll('f()(); let x; function f() { return function() { x = 1; } }');
 TestAll('f()(); let x; function f() { return function() { x += 1; } }');
 TestAll('f()(); let x; function f() { return function() { ++x; } }');
 TestAll('f()(); let x; function f() { return function() { x++; } }');
+TestAll('f()(); const x = 1; function f() { return function() { return x; } }');
 
-// Use in before initialization with a dynamic lookup.
+// Use before initialization with a dynamic lookup.
 TestAll('eval("x + 1;"); let x;');
 TestAll('eval("x = 1;"); let x;');
 TestAll('eval("x += 1;"); let x;');
 TestAll('eval("++x;"); let x;');
 TestAll('eval("x++;"); let x;');
+TestAll('eval("x"); const x = 1;');
+
+// Use before initialization with check for eval-shadowed bindings.
+TestAll('function f() { eval("var y = 2;"); x + 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x = 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x += 1; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); ++x; }; f(); let x;');
+TestAll('function f() { eval("var y = 2;"); x++; }; f(); let x;');
 
 // Test that variables introduced by function declarations are created and
 // initialized upon entering a function / block scope.
@@ -115,7 +130,7 @@
 
 // Test that a function declaration sees the scope it resides in.
 function f2() {
-  let m, n;
+  let m, n, o, p;
   {
     m = g;
     function g() {
@@ -132,7 +147,44 @@
     function h() {
       return b + c;
     }
-    let b = 3;
+    let c = 3;
   }
   assertEquals(5, n());
+
+  {
+    o = i;
+    function i() {
+      return d;
+    }
+    let d = 4;
+  }
+  assertEquals(4, o());
+
+  try {
+    throw 5;
+  } catch(e) {
+    p = j;
+    function j() {
+      return e + f;
+    }
+    let f = 6;
+  }
+  assertEquals(11, p());
 }
+f2();
+
+// Test that resolution of let bound variables works with scopes that call eval.
+function outer() {
+  function middle() {
+    function inner() {
+      return x;
+    }
+    eval("1 + 1");
+    return x + inner();
+  }
+
+  let x = 1;
+  return middle();
+}
+
+assertEquals(2, outer());
diff --git a/test/mjsunit/harmony/block-scoping.js b/test/mjsunit/harmony/block-scoping.js
index 266e380..31194d9 100644
--- a/test/mjsunit/harmony/block-scoping.js
+++ b/test/mjsunit/harmony/block-scoping.js
@@ -25,9 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --harmony-block-scoping
+// Flags: --allow-natives-syntax --harmony-scoping
 // Test functionality of block scopes.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
 // Hoisting of var declarations.
 function f1() {
   {
@@ -44,12 +47,16 @@
 function f2(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     assertEquals(1, eval('one'));
     assertEquals(2, eval('x'));
     assertEquals(3, eval('y'));
     assertEquals(4, eval('z'));
+    assertEquals(5, eval('u'));
+    assertEquals(6, eval('v'));
   }
 }
 f2(1);
@@ -59,12 +66,17 @@
 function f3(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     assertEquals(1, one);
     assertEquals(2, x);
     assertEquals(3, y);
     assertEquals(4, z);
+    assertEquals(5, u);
+    assertEquals(6, v);
+
   }
 }
 f3(1);
@@ -74,13 +86,17 @@
 function f4(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     function f() {
       assertEquals(1, eval('one'));
       assertEquals(2, eval('x'));
       assertEquals(3, eval('y'));
       assertEquals(4, eval('z'));
+      assertEquals(5, eval('u'));
+      assertEquals(6, eval('v'));
     };
   }
 }
@@ -91,13 +107,17 @@
 function f5(one) {
   var x = one + 1;
   let y = one + 2;
+  const u = one + 4;
   {
     let z = one + 3;
+    const v = one + 5;
     function f() {
       assertEquals(1, one);
       assertEquals(2, x);
       assertEquals(3, y);
       assertEquals(4, z);
+      assertEquals(5, u);
+      assertEquals(6, v);
     };
   }
 }
@@ -107,8 +127,10 @@
 // Return from block.
 function f6() {
   let x = 1;
+  const u = 3;
   {
     let y = 2;
+    const v = 4;
     return x + y;
   }
 }
@@ -120,13 +142,26 @@
   let b = 1;
   var c = 1;
   var d = 1;
-  { // let variables shadowing argument, let and var variables
+  const e = 1;
+  { // let variables shadowing argument, let, const and var variables
     let a = 2;
     let b = 2;
     let c = 2;
+    let e = 2;
     assertEquals(2,a);
     assertEquals(2,b);
     assertEquals(2,c);
+    assertEquals(2,e);
+  }
+  { // const variables shadowing argument, let, const and var variables
+    const a = 2;
+    const b = 2;
+    const c = 2;
+    const e = 2;
+    assertEquals(2,a);
+    assertEquals(2,b);
+    assertEquals(2,c);
+    assertEquals(2,e);
   }
   try {
     throw 'stuff1';
@@ -156,6 +191,12 @@
   } catch (c) {
     // catch variable shadowing var variable
     assertEquals('stuff3',c);
+    {
+      // const variable shadowing catch variable
+      const c = 3;
+      assertEquals(3,c);
+    }
+    assertEquals('stuff3',c);
     try {
       throw 'stuff4';
     } catch(c) {
@@ -178,14 +219,16 @@
     c = 2;
   }
   assertEquals(1,c);
-  (function(a,b,c) {
-    // arguments shadowing argument, let and var variable
+  (function(a,b,c,e) {
+    // arguments shadowing argument, let, const and var variable
     a = 2;
     b = 2;
     c = 2;
+    e = 2;
     assertEquals(2,a);
     assertEquals(2,b);
     assertEquals(2,c);
+    assertEquals(2,e);
     // var variable shadowing var variable
     var d = 2;
   })(1,1);
@@ -193,24 +236,30 @@
   assertEquals(1,b);
   assertEquals(1,c);
   assertEquals(1,d);
+  assertEquals(1,e);
 }
 f7(1);
 
 
-// Ensure let variables are block local and var variables function local.
+// Ensure let and const variables are block local
+// and var variables function local.
 function f8() {
   var let_accessors = [];
   var var_accessors = [];
+  var const_accessors = [];
   for (var i = 0; i < 10; i++) {
     let x = i;
     var y = i;
+    const z = i;
     let_accessors[i] = function() { return x; }
     var_accessors[i] = function() { return y; }
+    const_accessors[i] = function() { return z; }
   }
   for (var j = 0; j < 10; j++) {
     y = j + 10;
     assertEquals(j, let_accessors[j]());
     assertEquals(y, var_accessors[j]());
+    assertEquals(j, const_accessors[j]());
   }
 }
 f8();
diff --git a/test/mjsunit/harmony/collections.js b/test/mjsunit/harmony/collections.js
new file mode 100644
index 0000000..4b435c1
--- /dev/null
+++ b/test/mjsunit/harmony/collections.js
@@ -0,0 +1,280 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-collections --expose-gc
+
+
+// Test valid getter and setter calls on Sets.
+function TestValidSetCalls(m) {
+  assertDoesNotThrow(function () { m.add(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidSetCalls(new Set);
+
+
+// Test valid getter and setter calls on Maps and WeakMaps
+function TestValidMapCalls(m) {
+  assertDoesNotThrow(function () { m.get(new Object) });
+  assertDoesNotThrow(function () { m.set(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidMapCalls(new Map);
+TestValidMapCalls(new WeakMap);
+
+
+// Test invalid getter and setter calls for WeakMap only
+function TestInvalidCalls(m) {
+  assertThrows(function () { m.get(undefined) }, TypeError);
+  assertThrows(function () { m.set(undefined, 0) }, TypeError);
+  assertThrows(function () { m.get(null) }, TypeError);
+  assertThrows(function () { m.set(null, 0) }, TypeError);
+  assertThrows(function () { m.get(0) }, TypeError);
+  assertThrows(function () { m.set(0, 0) }, TypeError);
+  assertThrows(function () { m.get('a-key') }, TypeError);
+  assertThrows(function () { m.set('a-key', 0) }, TypeError);
+}
+TestInvalidCalls(new WeakMap);
+
+
+// Test expected behavior for Sets
+function TestSet(set, key) {
+  assertFalse(set.has(key));
+  set.add(key);
+  assertTrue(set.has(key));
+  set.delete(key);
+  assertFalse(set.has(key));
+}
+function TestSetBehavior(set) {
+  for (var i = 0; i < 20; i++) {
+    TestSet(set, new Object);
+    TestSet(set, i);
+    TestSet(set, i / 100);
+    TestSet(set, 'key-' + i);
+  }
+  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestSet(set, keys[i]);
+  }
+}
+TestSetBehavior(new Set);
+
+
+// Test expected mapping behavior for Maps and WeakMaps
+function TestMapping(map, key, value) {
+  map.set(key, value);
+  assertSame(value, map.get(key));
+}
+function TestMapBehavior1(m) {
+  TestMapping(m, new Object, 23);
+  TestMapping(m, new Object, 'the-value');
+  TestMapping(m, new Object, new Object);
+}
+TestMapBehavior1(new Map);
+TestMapBehavior1(new WeakMap);
+
+
+// Test expected mapping behavior for Maps only
+function TestMapBehavior2(m) {
+  for (var i = 0; i < 20; i++) {
+    TestMapping(m, i, new Object);
+    TestMapping(m, i / 10, new Object);
+    TestMapping(m, 'key-' + i, new Object);
+  }
+  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestMapping(m, keys[i], new Object);
+  }
+}
+TestMapBehavior2(new Map);
+
+
+// Test expected querying behavior of Maps and WeakMaps
+function TestQuery(m) {
+  var key = new Object;
+  TestMapping(m, key, 'to-be-present');
+  assertTrue(m.has(key));
+  assertFalse(m.has(new Object));
+  TestMapping(m, key, undefined);
+  assertFalse(m.has(key));
+  assertFalse(m.has(new Object));
+}
+TestQuery(new Map);
+TestQuery(new WeakMap);
+
+
+// Test expected deletion behavior of Maps and WeakMaps
+function TestDelete(m) {
+  var key = new Object;
+  TestMapping(m, key, 'to-be-deleted');
+  assertTrue(m.delete(key));
+  assertFalse(m.delete(key));
+  assertFalse(m.delete(new Object));
+  assertSame(m.get(key), undefined);
+}
+TestDelete(new Map);
+TestDelete(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with entry
+function TestGC1(m) {
+  var key = new Object;
+  m.set(key, 'not-collected');
+  gc();
+  assertSame('not-collected', m.get(key));
+}
+TestGC1(new Map);
+TestGC1(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with chained entries
+function TestGC2(m) {
+  var head = new Object;
+  for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
+    m.set(key, new Object);
+  }
+  gc();
+  var count = 0;
+  for (key = head; key != undefined; key = m.get(key)) {
+    count++;
+  }
+  assertEquals(11, count);
+}
+TestGC2(new Map);
+TestGC2(new WeakMap);
+
+
+// Test property attribute [[Enumerable]]
+function TestEnumerable(func) {
+  function props(x) {
+    var array = [];
+    for (var p in x) array.push(p);
+    return array.sort();
+  }
+  assertArrayEquals([], props(func));
+  assertArrayEquals([], props(func.prototype));
+  assertArrayEquals([], props(new func()));
+}
+TestEnumerable(Set);
+TestEnumerable(Map);
+TestEnumerable(WeakMap);
+
+
+// Test arbitrary properties on Maps and WeakMaps
+function TestArbitrary(m) {
+  function TestProperty(map, property, value) {
+    map[property] = value;
+    assertEquals(value, map[property]);
+  }
+  for (var i = 0; i < 20; i++) {
+    TestProperty(m, i, 'val' + i);
+    TestProperty(m, 'foo' + i, 'bar' + i);
+  }
+  TestMapping(m, new Object, 'foobar');
+}
+TestArbitrary(new Map);
+TestArbitrary(new WeakMap);
+
+
+// Test direct constructor call
+assertTrue(Set() instanceof Set);
+assertTrue(Map() instanceof Map);
+assertTrue(WeakMap() instanceof WeakMap);
+
+
+// Test whether NaN values as keys are treated correctly.
+var s = new Set;
+assertFalse(s.has(NaN));
+assertFalse(s.has(NaN + 1));
+assertFalse(s.has(23));
+s.add(NaN);
+assertTrue(s.has(NaN));
+assertTrue(s.has(NaN + 1));
+assertFalse(s.has(23));
+var m = new Map;
+assertFalse(m.has(NaN));
+assertFalse(m.has(NaN + 1));
+assertFalse(m.has(23));
+m.set(NaN, 'a-value');
+assertTrue(m.has(NaN));
+assertTrue(m.has(NaN + 1));
+assertFalse(m.has(23));
+
+
+// Test some common JavaScript idioms for Sets
+var s = new Set;
+assertTrue(s instanceof Set);
+assertTrue(Set.prototype.add instanceof Function)
+assertTrue(Set.prototype.has instanceof Function)
+assertTrue(Set.prototype.delete instanceof Function)
+
+
+// Test some common JavaScript idioms for Maps
+var m = new Map;
+assertTrue(m instanceof Map);
+assertTrue(Map.prototype.set instanceof Function)
+assertTrue(Map.prototype.get instanceof Function)
+assertTrue(Map.prototype.has instanceof Function)
+assertTrue(Map.prototype.delete instanceof Function)
+
+
+// Test some common JavaScript idioms for WeakMaps
+var m = new WeakMap;
+assertTrue(m instanceof WeakMap);
+assertTrue(WeakMap.prototype.set instanceof Function)
+assertTrue(WeakMap.prototype.get instanceof Function)
+assertTrue(WeakMap.prototype.has instanceof Function)
+assertTrue(WeakMap.prototype.delete instanceof Function)
+
+
+// Regression test for WeakMap prototype.
+assertTrue(WeakMap.prototype.constructor === WeakMap)
+assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
+
+
+// Regression test for issue 1617: The prototype of the WeakMap constructor
+// needs to be unique (i.e. different from the one of the Object constructor).
+assertFalse(WeakMap.prototype === Object.prototype);
+var o = Object.create({});
+assertFalse("get" in o);
+assertFalse("set" in o);
+assertEquals(undefined, o.get);
+assertEquals(undefined, o.set);
+var o = Object.create({}, { myValue: {
+  value: 10,
+  enumerable: false,
+  configurable: true,
+  writable: true
+}});
+assertEquals(10, o.myValue);
+
+
+// Stress Test
+// There is a proposed stress-test available at the es-discuss mailing list
+// which cannot be reasonably automated.  Check it out by hand if you like:
+// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
\ No newline at end of file
diff --git a/test/mjsunit/harmony/debug-blockscopes.js b/test/mjsunit/harmony/debug-blockscopes.js
index 0230e84..10aac2d 100644
--- a/test/mjsunit/harmony/debug-blockscopes.js
+++ b/test/mjsunit/harmony/debug-blockscopes.js
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
 
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug;
+var Debug = debug.Debug;
 
 var test_name;
 var listener_delegate;
@@ -76,6 +78,7 @@
   end_test_count++;
 }
 
+var global_object = this;
 
 // Check that the scope chain contains the expected types of scopes.
 function CheckScopeChain(scopes, exec_state) {
@@ -89,7 +92,7 @@
     if (scopes[i] == debug.ScopeType.Global) {
       // Objects don't have same class (one is "global", other is "Object",
       // so just check the properties directly.
-      assertPropertiesEqual(this, scope.scopeObject().value());
+      assertPropertiesEqual(global_object, scope.scopeObject().value());
     }
   }
 
@@ -329,114 +332,6 @@
 EndTest();
 
 
-// Single empty with block.
-BeginTest("With block 1");
-
-function with_block_1() {
-  with({}) {
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({}, 0, exec_state);
-  CheckScopeContent({}, 1, exec_state);
-};
-with_block_1();
-EndTest();
-
-
-// Nested empty with blocks.
-BeginTest("With block 2");
-
-function with_block_2() {
-  with({}) {
-    with({}) {
-      debugger;
-    }
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({}, 0, exec_state);
-  CheckScopeContent({}, 1, exec_state);
-  CheckScopeContent({}, 2, exec_state);
-};
-with_block_2();
-EndTest();
-
-
-// With block using an in-place object literal.
-BeginTest("With block 3");
-
-function with_block_3() {
-  with({a:1,b:2}) {
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:1,b:2}, 0, exec_state);
-};
-with_block_3();
-EndTest();
-
-
-// Nested with blocks using in-place object literals.
-BeginTest("With block 4");
-
-function with_block_4() {
-  with({a:1,b:2}) {
-    with({a:2,b:1}) {
-      debugger;
-    }
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.With,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:2,b:1}, 0, exec_state);
-  CheckScopeContent({a:1,b:2}, 1, exec_state);
-};
-with_block_4();
-EndTest();
-
-
-// With block and a block local variable.
-BeginTest("With block 5");
-
-function with_block_5() {
-  with({a:1}) {
-    let a = 2;
-    debugger;
-  }
-}
-
-listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.Block,
-                   debug.ScopeType.With,
-                   debug.ScopeType.Local,
-                   debug.ScopeType.Global], exec_state);
-  CheckScopeContent({a:2}, 0, exec_state);
-  CheckScopeContent({a:1}, 1, exec_state);
-};
-with_block_5();
-EndTest();
-
-
 // Simple closure formed by returning an inner function referering to an outer
 // block local variable and an outer function's parameter.
 BeginTest("Closure 1");
@@ -464,3 +359,112 @@
 };
 closure_1(1)();
 EndTest();
+
+
+// Simple for-in loop over the keys of an object.
+BeginTest("For loop 1");
+
+function for_loop_1() {
+  for (let x in {y:undefined}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:'y'}, 0, exec_state);
+  // The function scope contains a temporary iteration variable.
+  CheckScopeContent({x:'y'}, 1, exec_state);
+};
+for_loop_1();
+EndTest();
+
+
+// For-in loop over the keys of an object with a block scoped let variable
+// shadowing the iteration variable.
+BeginTest("For loop 2");
+
+function for_loop_2() {
+  for (let x in {y:undefined}) {
+    let x = 3;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3}, 0, exec_state);
+  CheckScopeContent({x:'y'}, 1, exec_state);
+  // The function scope contains a temporary iteration variable.
+  CheckScopeContent({x:'y'}, 2, exec_state);
+};
+for_loop_2();
+EndTest();
+
+
+// Simple for loop.
+BeginTest("For loop 3");
+
+function for_loop_3() {
+  for (let x = 3; x < 4; ++x) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+};
+for_loop_3();
+EndTest();
+
+
+// For loop with a block scoped let variable shadowing the iteration variable.
+BeginTest("For loop 4");
+
+function for_loop_4() {
+  for (let x = 3; x < 4; ++x) {
+    let x = 5;
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:5}, 0, exec_state);
+  CheckScopeContent({x:3}, 1, exec_state);
+  CheckScopeContent({}, 2, exec_state);
+};
+for_loop_4();
+EndTest();
+
+
+// For loop with two variable declarations.
+BeginTest("For loop 5");
+
+function for_loop_5() {
+  for (let x = 3, y = 5; x < 4; ++x) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({x:3,y:5}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+};
+for_loop_5();
+EndTest();
diff --git a/test/mjsunit/harmony/debug-evaluate-blockscopes.js b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
index 549960a..d6ce8b2 100644
--- a/test/mjsunit/harmony/debug-evaluate-blockscopes.js
+++ b/test/mjsunit/harmony/debug-evaluate-blockscopes.js
@@ -25,11 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --harmony-block-scoping
+// Flags: --expose-debug-as debug --harmony-scoping
 
 // Test debug evaluation for functions without local context, but with
 // nested catch contexts.
 
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+var x;
+var result;
+
 function f() {
   {                   // Line 1.
     let i = 1;        // Line 2.
@@ -42,7 +48,7 @@
 };
 
 // Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
+var Debug = debug.Debug
 // Set breakpoint on line 6.
 var bp = Debug.setBreakPoint(f, 6);
 
diff --git a/test/mjsunit/harmony/proxies-example-membrane.js b/test/mjsunit/harmony/proxies-example-membrane.js
new file mode 100644
index 0000000..c6e7f9f
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-example-membrane.js
@@ -0,0 +1,512 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+
+
+// A simple no-op handler. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#examplea_no-op_forwarding_proxy
+
+function createHandler(obj) {
+  return {
+    getOwnPropertyDescriptor: function(name) {
+      var desc = Object.getOwnPropertyDescriptor(obj, name);
+      if (desc !== undefined) desc.configurable = true;
+      return desc;
+    },
+    getPropertyDescriptor: function(name) {
+      var desc = Object.getOwnPropertyDescriptor(obj, name);
+      //var desc = Object.getPropertyDescriptor(obj, name);  // not in ES5
+      if (desc !== undefined) desc.configurable = true;
+      return desc;
+    },
+    getOwnPropertyNames: function() {
+      return Object.getOwnPropertyNames(obj);
+    },
+    getPropertyNames: function() {
+      return Object.getOwnPropertyNames(obj);
+      //return Object.getPropertyNames(obj);  // not in ES5
+    },
+    defineProperty: function(name, desc) {
+      Object.defineProperty(obj, name, desc);
+    },
+    delete: function(name) {
+      return delete obj[name];
+    },
+    fix: function() {
+      if (Object.isFrozen(obj)) {
+        var result = {};
+        Object.getOwnPropertyNames(obj).forEach(function(name) {
+          result[name] = Object.getOwnPropertyDescriptor(obj, name);
+        });
+        return result;
+      }
+      // As long as obj is not frozen, the proxy won't allow itself to be fixed
+      return undefined; // will cause a TypeError to be thrown
+    },
+    has: function(name) { return name in obj; },
+    hasOwn: function(name) { return ({}).hasOwnProperty.call(obj, name); },
+    get: function(receiver, name) { return obj[name]; },
+    set: function(receiver, name, val) {
+      obj[name] = val;  // bad behavior when set fails in non-strict mode
+      return true;
+    },
+    enumerate: function() {
+      var result = [];
+      for (var name in obj) { result.push(name); };
+      return result;
+    },
+    keys: function() { return Object.keys(obj); }
+  };
+}
+
+
+
+// Auxiliary definitions enabling tracking of object identity in output.
+
+var objectMap = new WeakMap;
+var objectCounter = 0;
+
+function registerObject(x, s) {
+  if (x === Object(x) && !objectMap.has(x))
+    objectMap.set(x, ++objectCounter + (s == undefined ? "" : ":" + s));
+}
+
+registerObject(this, "global");
+registerObject(Object.prototype, "Object.prototype");
+
+function str(x) {
+  if (x === Object(x)) return "[" + typeof x + " " + objectMap.get(x) + "]";
+  if (typeof x == "string") return "\"" + x + "\"";
+  return "" + x;
+}
+
+
+
+// A simple membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#a_simple_membrane
+
+function createSimpleMembrane(target) {
+  var enabled = true;
+
+  function wrap(obj) {
+    registerObject(obj);
+    print("wrap enter", str(obj));
+    try {
+      var x = wrap2(obj);
+      registerObject(x, "wrapped");
+      print("wrap exit", str(obj), "as", str(x));
+      return x;
+    } catch(e) {
+      print("wrap exception", str(e));
+      throw e;
+    }
+  }
+
+  function wrap2(obj) {
+    if (obj !== Object(obj)) {
+      return obj;
+    }
+
+    function wrapCall(fun, that, args) {
+      registerObject(that);
+      print("wrapCall enter", fun, str(that));
+      try {
+        var x = wrapCall2(fun, that, args);
+        print("wrapCall exit", fun, str(that), "returning", str(x));
+        return x;
+      } catch(e) {
+        print("wrapCall exception", fun, str(that), str(e));
+        throw e;
+      }
+    }
+
+    function wrapCall2(fun, that, args) {
+      if (!enabled) { throw new Error("disabled"); }
+      try {
+        return wrap(fun.apply(that, Array.prototype.map.call(args, wrap)));
+      } catch (e) {
+        throw wrap(e);
+      }
+    }
+
+    var baseHandler = createHandler(obj);
+    var handler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("handler enter", name, arg);
+          var x = wrapCall(baseHandler[name], baseHandler, arguments);
+          print("handler exit", name, arg, "returning", str(x));
+          return x;
+        }
+      }
+    }));
+    registerObject(baseHandler, "basehandler");
+    registerObject(handler, "handler");
+
+    if (typeof obj === "function") {
+      function callTrap() {
+        print("call trap enter", str(obj), str(this));
+        var x = wrapCall(obj, wrap(this), arguments);
+        print("call trap exit", str(obj), str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        if (!enabled) { throw new Error("disabled"); }
+        try {
+          function forward(args) { return obj.apply(this, args) }
+          return wrap(new forward(Array.prototype.map.call(arguments, wrap)));
+        } catch (e) {
+          throw wrap(e);
+        }
+      }
+      return Proxy.createFunction(handler, callTrap, constructTrap);
+    } else {
+      var prototype = wrap(Object.getPrototypeOf(obj));
+      return Proxy.create(handler, prototype);
+    }
+  }
+
+  var gate = Object.freeze({
+    enable: function() { enabled = true; },
+    disable: function() { enabled = false; }
+  });
+
+  return Object.freeze({
+    wrapper: wrap(target),
+    gate: gate
+  });
+}
+
+
+var o = {
+  a: 6,
+  b: {bb: 8},
+  f: function(x) { return x },
+  g: function(x) { return x.a },
+  h: function(x) { this.q = x }
+};
+o[2] = {c: 7};
+var m = createSimpleMembrane(o);
+var w = m.wrapper;
+print("o =", str(o))
+print("w =", str(w));
+
+var f = w.f;
+var x = f(66);
+var x = f({a: 1});
+var x = w.f({a: 1});
+var a = x.a;
+assertEquals(6, w.a);
+assertEquals(8, w.b.bb);
+assertEquals(7, w[2]["c"]);
+assertEquals(undefined, w.c);
+assertEquals(1, w.f(1));
+assertEquals(1, w.f({a: 1}).a);
+assertEquals(2, w.g({a: 2}));
+assertEquals(3, (w.r = {a: 3}).a);
+assertEquals(3, w.r.a);
+assertEquals(3, o.r.a);
+w.h(3);
+assertEquals(3, w.q);
+assertEquals(3, o.q);
+assertEquals(4, (new w.h(4)).q);
+
+var wb = w.b;
+var wr = w.r;
+var wf = w.f;
+var wf3 = w.f(3);
+var wfx = w.f({a: 6});
+var wgx = w.g({a: {aa: 7}});
+var wh4 = new w.h(4);
+m.gate.disable();
+assertEquals(3, wf3);
+assertThrows(function() { w.a }, Error);
+assertThrows(function() { w.r }, Error);
+assertThrows(function() { w.r = {a: 4} }, Error);
+assertThrows(function() { o.r.a }, Error);
+assertEquals("object", typeof o.r);
+assertEquals(5, (o.r = {a: 5}).a);
+assertEquals(5, o.r.a);
+assertThrows(function() { w[1] }, Error);
+assertThrows(function() { w.c }, Error);
+assertThrows(function() { wb.bb }, Error);
+assertThrows(function() { wr.a }, Error);
+assertThrows(function() { wf(4) }, Error);
+assertThrows(function() { wfx.a }, Error);
+assertThrows(function() { wgx.aa }, Error);
+assertThrows(function() { wh4.q }, Error);
+
+m.gate.enable();
+assertEquals(6, w.a);
+assertEquals(5, w.r.a);
+assertEquals(5, o.r.a);
+assertEquals(7, w.r = 7);
+assertEquals(7, w.r);
+assertEquals(7, o.r);
+assertEquals(8, w.b.bb);
+assertEquals(7, w[2]["c"]);
+assertEquals(undefined, w.c);
+assertEquals(8, wb.bb);
+assertEquals(3, wr.a);
+assertEquals(4, wf(4));
+assertEquals(3, wf3);
+assertEquals(6, wfx.a);
+assertEquals(7, wgx.aa);
+assertEquals(4, wh4.q);
+
+
+// An identity-preserving membrane. Adapted from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:proxies#an_identity-preserving_membrane
+
+function createMembrane(wetTarget) {
+  var wet2dry = WeakMap();
+  var dry2wet = WeakMap();
+
+  function asDry(obj) {
+    registerObject(obj)
+    print("asDry enter", str(obj))
+    try {
+      var x = asDry2(obj);
+      registerObject(x, "dry");
+      print("asDry exit", str(obj), "as", str(x));
+      return x;
+    } catch(e) {
+      print("asDry exception", str(e));
+      throw e;
+    }
+  }
+  function asDry2(wet) {
+    if (wet !== Object(wet)) {
+      // primitives provide only irrevocable knowledge, so don't
+      // bother wrapping it.
+      return wet;
+    }
+    var dryResult = wet2dry.get(wet);
+    if (dryResult) { return dryResult; }
+
+    var wetHandler = createHandler(wet);
+    var dryRevokeHandler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("dry handler enter", name, arg);
+          var optWetHandler = dry2wet.get(dryRevokeHandler);
+          try {
+            var x = asDry(optWetHandler[name].apply(
+              optWetHandler, Array.prototype.map.call(arguments, asWet)));
+            print("dry handler exit", name, arg, "returning", str(x));
+            return x;
+          } catch (eWet) {
+            var x = asDry(eWet);
+            print("dry handler exception", name, arg, "throwing", str(x));
+            throw x;
+          }
+        };
+      }
+    }));
+    dry2wet.set(dryRevokeHandler, wetHandler);
+
+    if (typeof wet === "function") {
+      function callTrap() {
+        print("dry call trap enter", str(this));
+        var x = asDry(wet.apply(
+          asWet(this), Array.prototype.map.call(arguments, asWet)));
+        print("dry call trap exit", str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        function forward(args) { return wet.apply(this, args) }
+        return asDry(new forward(Array.prototype.map.call(arguments, asWet)));
+      }
+      dryResult =
+        Proxy.createFunction(dryRevokeHandler, callTrap, constructTrap);
+    } else {
+      dryResult =
+        Proxy.create(dryRevokeHandler, asDry(Object.getPrototypeOf(wet)));
+    }
+    wet2dry.set(wet, dryResult);
+    dry2wet.set(dryResult, wet);
+    return dryResult;
+  }
+
+  function asWet(obj) {
+    registerObject(obj)
+    print("asWet enter", str(obj))
+    try {
+      var x = asWet2(obj)
+      registerObject(x, "wet")
+      print("asWet exit", str(obj), "as", str(x))
+      return x
+    } catch(e) {
+      print("asWet exception", str(e))
+      throw e
+    }
+  }
+  function asWet2(dry) {
+    if (dry !== Object(dry)) {
+      // primitives provide only irrevocable knowledge, so don't
+      // bother wrapping it.
+      return dry;
+    }
+    var wetResult = dry2wet.get(dry);
+    if (wetResult) { return wetResult; }
+
+    var dryHandler = createHandler(dry);
+    var wetRevokeHandler = Proxy.create(Object.freeze({
+      get: function(receiver, name) {
+        return function() {
+          var arg = (name === "get" || name == "set") ? arguments[1] : "";
+          print("wet handler enter", name, arg);
+          var optDryHandler = wet2dry.get(wetRevokeHandler);
+          try {
+            var x = asWet(optDryHandler[name].apply(
+              optDryHandler, Array.prototype.map.call(arguments, asDry)));
+            print("wet handler exit", name, arg, "returning", str(x));
+            return x;
+          } catch (eDry) {
+            var x = asWet(eDry);
+            print("wet handler exception", name, arg, "throwing", str(x));
+            throw x;
+          }
+        };
+      }
+    }));
+    wet2dry.set(wetRevokeHandler, dryHandler);
+
+    if (typeof dry === "function") {
+      function callTrap() {
+        print("wet call trap enter", str(this));
+        var x = asWet(dry.apply(
+          asDry(this), Array.prototype.map.call(arguments, asDry)));
+        print("wet call trap exit", str(this), "returning", str(x));
+        return x;
+      }
+      function constructTrap() {
+        function forward(args) { return dry.apply(this, args) }
+        return asWet(new forward(Array.prototype.map.call(arguments, asDry)));
+      }
+      wetResult =
+        Proxy.createFunction(wetRevokeHandler, callTrap, constructTrap);
+    } else {
+      wetResult =
+        Proxy.create(wetRevokeHandler, asWet(Object.getPrototypeOf(dry)));
+    }
+    dry2wet.set(dry, wetResult);
+    wet2dry.set(wetResult, dry);
+    return wetResult;
+  }
+
+  var gate = Object.freeze({
+    revoke: function() {
+      dry2wet = wet2dry = Object.freeze({
+        get: function(key) { throw new Error("revoked"); },
+        set: function(key, val) { throw new Error("revoked"); }
+      });
+    }
+  });
+
+  return Object.freeze({ wrapper: asDry(wetTarget), gate: gate });
+}
+
+
+var receiver
+var argument
+var o = {
+  a: 6,
+  b: {bb: 8},
+  f: function(x) { receiver = this; argument = x; return x },
+  g: function(x) { receiver = this; argument = x; return x.a },
+  h: function(x) { receiver = this; argument = x; this.q = x },
+  s: function(x) { receiver = this; argument = x; this.x = {y: x}; return this }
+}
+o[2] = {c: 7}
+var m = createMembrane(o)
+var w = m.wrapper
+print("o =", str(o))
+print("w =", str(w))
+
+var f = w.f
+var x = f(66)
+var x = f({a: 1})
+var x = w.f({a: 1})
+var a = x.a
+assertEquals(6, w.a)
+assertEquals(8, w.b.bb)
+assertEquals(7, w[2]["c"])
+assertEquals(undefined, w.c)
+assertEquals(1, w.f(1))
+assertSame(o, receiver)
+assertEquals(1, w.f({a: 1}).a)
+assertSame(o, receiver)
+assertEquals(2, w.g({a: 2}))
+assertSame(o, receiver)
+assertSame(w, w.f(w))
+assertSame(o, receiver)
+assertSame(o, argument)
+assertSame(o, w.f(o))
+assertSame(o, receiver)
+// Note that argument !== o, since o isn't dry, so gets wrapped wet again.
+assertEquals(3, (w.r = {a: 3}).a)
+assertEquals(3, w.r.a)
+assertEquals(3, o.r.a)
+w.h(3)
+assertEquals(3, w.q)
+assertEquals(3, o.q)
+assertEquals(4, (new w.h(4)).q)
+assertEquals(5, w.s(5).x.y)
+assertSame(o, receiver)
+
+var wb = w.b
+var wr = w.r
+var wf = w.f
+var wf3 = w.f(3)
+var wfx = w.f({a: 6})
+var wgx = w.g({a: {aa: 7}})
+var wh4 = new w.h(4)
+var ws5 = w.s(5)
+var ws5x = ws5.x
+m.gate.revoke()
+assertEquals(3, wf3)
+assertThrows(function() { w.a }, Error)
+assertThrows(function() { w.r }, Error)
+assertThrows(function() { w.r = {a: 4} }, Error)
+assertThrows(function() { o.r.a }, Error)
+assertEquals("object", typeof o.r)
+assertEquals(5, (o.r = {a: 5}).a)
+assertEquals(5, o.r.a)
+assertThrows(function() { w[1] }, Error)
+assertThrows(function() { w.c }, Error)
+assertThrows(function() { wb.bb }, Error)
+assertEquals(3, wr.a)
+assertThrows(function() { wf(4) }, Error)
+assertEquals(6, wfx.a)
+assertEquals(7, wgx.aa)
+assertThrows(function() { wh4.q }, Error)
+assertThrows(function() { ws5.x }, Error)
+assertThrows(function() { ws5x.y }, Error)
diff --git a/test/mjsunit/harmony/proxies-for.js b/test/mjsunit/harmony/proxies-for.js
new file mode 100644
index 0000000..3d419c6
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-for.js
@@ -0,0 +1,168 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies
+
+
+// Helper.
+
+function TestWithProxies(test, x, y, z) {
+  test(Proxy.create, x, y, z)
+  test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+}
+
+
+// Iterate over a proxy.
+
+function TestForIn(properties, handler) {
+  TestWithProxies(TestForIn2, properties, handler)
+}
+
+function TestForIn2(create, properties, handler) {
+  var p = create(handler)
+  var found = []
+  for (var x in p) found.push(x)
+  assertArrayEquals(properties, found)
+}
+
+TestForIn(["0", "a"], {
+  enumerate: function() { return [0, "a"] }
+})
+
+TestForIn(["null", "a"], {
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { return [null, "a"] }
+})
+
+TestForIn(["b", "d"], {
+  getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
+  getPropertyDescriptor: function(k) {
+    switch (k) {
+      case "a": return {enumerable: false, value: "3"};
+      case "b": return {enumerable: true, get get() {}};
+      case "c": return {value: 4};
+      case "d": return {get enumerable() { return true }};
+      default: return undefined;
+    }
+  }
+})
+
+TestForIn(["b", "a", "0", "c"], Proxy.create({
+  get: function(pr, pk) {
+    return function() { return ["b", "a", 0, "c"] }
+  }
+}))
+
+
+
+// Iterate over an object with a proxy prototype.
+
+function TestForInDerived(properties, handler) {
+  TestWithProxies(TestForInDerived2, properties, handler)
+}
+
+function TestForInDerived2(create, properties, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+  o.z = 0
+  var found = []
+  for (var x in o) found.push(x)
+  assertArrayEquals(["z"].concat(properties), found)
+
+  var oo = Object.create(o)
+  oo.y = 0
+  var found = []
+  for (var x in oo) found.push(x)
+  assertArrayEquals(["y", "z"].concat(properties), found)
+}
+
+TestForInDerived(["0", "a"], {
+  enumerate: function() { return [0, "a"] },
+  getPropertyDescriptor: function(k) {
+    return k == "0" || k == "a" ? {} : undefined
+  }
+})
+
+TestForInDerived(["null", "a"], {
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { return [null, "a"] },
+  getPropertyDescriptor: function(k) {
+    return k == "null" || k == "a" ? {} : undefined
+  }
+})
+
+TestForInDerived(["b", "d"], {
+  getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
+  getPropertyDescriptor: function(k) {
+    switch (k) {
+      case "a": return {enumerable: false, value: "3"};
+      case "b": return {enumerable: true, get get() {}};
+      case "c": return {value: 4};
+      case "d": return {get enumerable() { return true }};
+      default: return undefined;
+    }
+  }
+})
+
+
+
+// Throw exception in enumerate trap.
+
+function TestForInThrow(handler) {
+  TestWithProxies(TestForInThrow2, handler)
+}
+
+function TestForInThrow2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+  assertThrows(function(){ for (var x in p) {} }, "myexn")
+  assertThrows(function(){ for (var x in o) {} }, "myexn")
+}
+
+TestForInThrow({
+  enumerate: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  enumerate: function() { return this.enumerate2() },
+  enumerate2: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  getPropertyNames: function() { throw "myexn" }
+})
+
+TestForInThrow({
+  getPropertyNames: function() { return ["a"] },
+  getPropertyDescriptor: function() { throw "myexn" }
+})
+
+TestForInThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function() { throw "myexn" }
+  }
+}))
diff --git a/test/mjsunit/harmony/proxies-function.js b/test/mjsunit/harmony/proxies-function.js
new file mode 100644
index 0000000..3f5ace6
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-function.js
@@ -0,0 +1,732 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --allow-natives-syntax
+
+
+// Helper.
+
+function CreateFrozen(handler, callTrap, constructTrap) {
+  if (handler.fix === undefined) handler.fix = function() { return {} }
+  var f = Proxy.createFunction(handler, callTrap, constructTrap)
+  Object.freeze(f)
+  return f
+}
+
+
+// Ensures that checking the "length" property of a function proxy doesn't
+// crash due to lack of a [[Get]] method.
+var handler = {
+  get : function(r, n) { return n == "length" ? 2 : undefined }
+}
+
+
+// Calling (call, Function.prototype.call, Function.prototype.apply,
+//          Function.prototype.bind).
+
+var global_object = this
+var receiver
+
+function TestCall(isStrict, callTrap) {
+  assertEquals(42, callTrap(5, 37))
+  // TODO(rossberg): strict mode seems to be broken on x64...
+  // assertSame(isStrict ? undefined : global_object, receiver)
+
+  var handler = {
+    get: function(r, k) {
+      return k == "length" ? 2 : Function.prototype[k]
+    }
+  }
+  var f = Proxy.createFunction(handler, callTrap)
+  var o = {f: f}
+  global_object.f = f
+
+  receiver = 333
+  assertEquals(42, f(11, 31))
+  // TODO(rossberg): strict mode seems to be broken on x64...
+  // assertSame(isStrict ? undefined : global_object, receiver)
+  receiver = 333
+  assertEquals(42, o.f(10, 32))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, o["f"](9, 33))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o).f(8, 34))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o)["f"](7, 35))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, f.call(o, 32, 10))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, f.call(undefined, 33, 9))
+  assertSame(isStrict ? undefined : global_object, receiver)
+  receiver = 333
+  assertEquals(42, f.call(null, 33, 9))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(44, f.call(2, 21, 23))
+  assertSame(2, receiver.valueOf())
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+  assertSame(isStrict ? null : global_object, receiver)
+  assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+  assertEquals(2, receiver.valueOf())
+  receiver = 333
+  assertEquals(32, f.apply(o, [16, 16]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call(o, 11, 31, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call(null, 11, 31, f))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(f, null, [11, 31], 0, 2))
+  assertSame(isStrict ? null : global_object, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(o, 11, 31, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(null, 11, 31, f))
+  assertSame(isStrict ? null : global_object, receiver)
+
+  var ff = Function.prototype.bind.call(f, o, 12)
+  assertTrue(ff.length <= 1)  // TODO(rossberg): Not spec'ed yet, be lax.
+  receiver = 333
+  assertEquals(42, ff(30))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(33, Function.prototype.call.call(ff, {}, 21))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call({}, 11, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call({}, 11, 3, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(24, %Apply(ff, {}, [12, 13], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(34, %_CallFunction({}, 22, ff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(34, %_CallFunction({}, 22, 3, ff))
+  assertSame(o, receiver)
+
+  var fff = Function.prototype.bind.call(ff, o, 30)
+  assertEquals(0, fff.length)
+  receiver = 333
+  assertEquals(42, fff())
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(fff, {}))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.apply.call(fff, {}))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call({}, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Call({}, 11, 3, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [], 0, 0))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [12, 13], 0, 0))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %Apply(fff, {}, [12, 13], 0, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction({}, fff))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction({}, 3, 4, 5, fff))
+  assertSame(o, receiver)
+
+  var f = CreateFrozen({}, callTrap)
+  receiver = 333
+  assertEquals(42, f(11, 31))
+  assertSame(isStrict ? undefined : global_object, receiver)
+  var o = {f: f}
+  receiver = 333
+  assertEquals(42, o.f(10, 32))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, o["f"](9, 33))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o).f(8, 34))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, (1, o)["f"](7, 35))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(23, %Call(o, 11, 12, f))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
+  assertSame(o, receiver)
+  receiver = 333
+  assertEquals(42, %_CallFunction(o, 18, 24, f))
+  assertSame(o, receiver)
+}
+
+TestCall(false, function(x, y) {
+  receiver = this
+  return x + y
+})
+
+TestCall(true, function(x, y) {
+  "use strict"
+  receiver = this
+  return x + y
+})
+
+TestCall(false, function() {
+  receiver = this
+  return arguments[0] + arguments[1]
+})
+
+TestCall(false, Proxy.createFunction(handler, function(x, y) {
+  receiver = this
+  return x + y
+}))
+
+TestCall(true, Proxy.createFunction(handler, function(x, y) {
+  "use strict"
+  receiver = this
+  return x + y
+}))
+
+TestCall(false, CreateFrozen(handler, function(x, y) {
+  receiver = this
+  return x + y
+}))
+
+
+
+// Using intrinsics as call traps.
+
+function TestCallIntrinsic(type, callTrap) {
+  var f = Proxy.createFunction({}, callTrap)
+  var x = f()
+  assertTrue(typeof x == type)
+}
+
+TestCallIntrinsic("boolean", Boolean)
+TestCallIntrinsic("number", Number)
+TestCallIntrinsic("string", String)
+TestCallIntrinsic("object", Object)
+TestCallIntrinsic("function", Function)
+
+
+
+// Throwing from call trap.
+
+function TestCallThrow(callTrap) {
+  var f = Proxy.createFunction({}, callTrap)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+  assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+  assertThrows(function(){ %Call({}, f) }, "myexn")
+  assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, 1, 2, f) }, "myexn")
+
+  var f = CreateFrozen({}, callTrap)
+  assertThrows(function(){ f(11) }, "myexn")
+  assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+  assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+  assertThrows(function(){ %Call({}, f) }, "myexn")
+  assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+  assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
+  assertThrows(function(){ %_CallFunction({}, 1, 2, f) }, "myexn")
+}
+
+TestCallThrow(function() { throw "myexn" })
+TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
+
+
+
+// Construction (new).
+
+var prototype = {myprop: 0}
+var receiver
+
+var handlerWithPrototype = {
+  fix: function() { return { prototype: { value: prototype } }; },
+  get: function(r, n) {
+    if (n == "length") return 2;
+    assertEquals("prototype", n);
+    return prototype;
+  }
+}
+
+var handlerSansPrototype = {
+  fix: function() { return { length: { value: 2 } } },
+  get: function(r, n) {
+    if (n == "length") return 2;
+    assertEquals("prototype", n);
+    return undefined;
+  }
+}
+
+function ReturnUndef(x, y) {
+  "use strict";
+  receiver = this;
+  this.sum = x + y;
+}
+
+function ReturnThis(x, y) {
+  "use strict";
+  receiver = this;
+  this.sum = x + y;
+  return this;
+}
+
+function ReturnNew(x, y) {
+  "use strict";
+  receiver = this;
+  return {sum: x + y};
+}
+
+function ReturnNewWithProto(x, y) {
+  "use strict";
+  receiver = this;
+  var result = Object.create(prototype);
+  result.sum = x + y;
+  return result;
+}
+
+function TestConstruct(proto, constructTrap) {
+  TestConstruct2(proto, constructTrap, handlerWithPrototype)
+  TestConstruct2(proto, constructTrap, handlerSansPrototype)
+}
+
+function TestConstruct2(proto, constructTrap, handler) {
+  var f = Proxy.createFunction(handler, function() {}, constructTrap)
+  var o = new f(11, 31)
+  assertEquals(undefined, receiver)
+  assertEquals(42, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+
+  var f = CreateFrozen(handler, function() {}, constructTrap)
+  var o = new f(11, 32)
+  assertEquals(undefined, receiver)
+  assertEquals(43, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstruct(Object.prototype, ReturnNew)
+TestConstruct(prototype, ReturnNewWithProto)
+
+TestConstruct(Object.prototype, Proxy.createFunction(handler, ReturnNew))
+TestConstruct(prototype, Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstruct(Object.prototype, CreateFrozen(handler, ReturnNew))
+TestConstruct(prototype, CreateFrozen(handler, ReturnNewWithProto))
+
+
+
+// Construction with derived construct trap.
+
+function TestConstructFromCall(proto, returnsThis, callTrap) {
+  TestConstructFromCall2(prototype, returnsThis, callTrap, handlerWithPrototype)
+  TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
+}
+
+function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
+  // TODO(rossberg): handling of prototype for derived construct trap will be
+  // fixed in a separate change. Commenting out checks below for now.
+  var f = Proxy.createFunction(handler, callTrap)
+  var o = new f(11, 31)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(42, o.sum)
+  // assertSame(proto, Object.getPrototypeOf(o))
+
+  var g = CreateFrozen(handler, callTrap)
+  // assertSame(f.prototype, g.prototype)
+  var o = new g(11, 32)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(43, o.sum)
+  // assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstructFromCall(Object.prototype, true, ReturnUndef)
+TestConstructFromCall(Object.prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnUndef))
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handler, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
+TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
+TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
+
+ReturnUndef.prototype = prototype
+ReturnThis.prototype = prototype
+ReturnNew.prototype = prototype
+ReturnNewWithProto.prototype = prototype
+
+TestConstructFromCall(prototype, true, ReturnUndef)
+TestConstructFromCall(prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnUndef))
+TestConstructFromCall(Object.prototype, true,
+                      Proxy.createFunction(handler, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handler, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handler, ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true,
+                      Proxy.createFunction(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true,
+                      Proxy.createFunction(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      Proxy.createFunction(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      Proxy.createFunction(handlerWithPrototype,
+                                           ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true,
+                      CreateFrozen(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true,
+                      CreateFrozen(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false,
+                      CreateFrozen(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false,
+                      CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
+
+
+
+// Throwing from the construct trap.
+
+function TestConstructThrow(trap) {
+  TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+                                           trap))
+  TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+                                           function() {},
+                                           trap))
+}
+
+function TestConstructThrow2(f) {
+  assertThrows(function(){ new f(11) }, "myexn")
+  Object.freeze(f)
+  assertThrows(function(){ new f(11) }, "myexn")
+}
+
+TestConstructThrow(function() { throw "myexn" })
+TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
+
+
+
+// Using function proxies as getters and setters.
+
+var value
+var receiver
+
+function TestAccessorCall(getterCallTrap, setterCallTrap) {
+  var handler = { fix: function() { return {} } }
+  var pgetter = Proxy.createFunction(handler, getterCallTrap)
+  var psetter = Proxy.createFunction(handler, setterCallTrap)
+
+  var o = {}
+  var oo = Object.create(o)
+  Object.defineProperty(o, "a", {get: pgetter, set: psetter})
+  Object.defineProperty(o, "b", {get: pgetter})
+  Object.defineProperty(o, "c", {set: psetter})
+  Object.defineProperty(o, "3", {get: pgetter, set: psetter})
+  Object.defineProperty(oo, "a", {value: 43})
+
+  receiver = ""
+  assertEquals(42, o.a)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o.b)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(undefined, o.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, o["a"])
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o[3])
+  assertSame(o, receiver)
+
+  receiver = ""
+  assertEquals(43, oo.a)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo.b)
+  assertSame(oo, receiver)
+  receiver = ""
+  assertEquals(undefined, oo.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(43, oo["a"])
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo[3])
+  assertSame(oo, receiver)
+
+  receiver = ""
+  assertEquals(50, o.a = 50)
+  assertSame(o, receiver)
+  assertEquals(50, value)
+  receiver = ""
+  assertEquals(51, o.b = 51)
+  assertEquals("", receiver)
+  assertEquals(50, value)  // no setter
+  assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
+  receiver = ""
+  assertEquals(52, o.c = 52)
+  assertSame(o, receiver)
+  assertEquals(52, value)
+  receiver = ""
+  assertEquals(53, o["a"] = 53)
+  assertSame(o, receiver)
+  assertEquals(53, value)
+  receiver = ""
+  assertEquals(54, o[3] = 54)
+  assertSame(o, receiver)
+  assertEquals(54, value)
+
+  value = 0
+  receiver = ""
+  assertEquals(60, oo.a = 60)
+  assertEquals("", receiver)
+  assertEquals(0, value)  // oo has own 'a'
+  assertEquals(61, oo.b = 61)
+  assertSame("", receiver)
+  assertEquals(0, value)  // no setter
+  assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
+  receiver = ""
+  assertEquals(62, oo.c = 62)
+  assertSame(oo, receiver)
+  assertEquals(62, value)
+  receiver = ""
+  assertEquals(63, oo["c"] = 63)
+  assertSame(oo, receiver)
+  assertEquals(63, value)
+  receiver = ""
+  assertEquals(64, oo[3] = 64)
+  assertSame(oo, receiver)
+  assertEquals(64, value)
+}
+
+TestAccessorCall(
+  function() { receiver = this; return 42 },
+  function(x) { receiver = this; value = x }
+)
+
+TestAccessorCall(
+  function() { "use strict"; receiver = this; return 42 },
+  function(x) { "use strict"; receiver = this; value = x }
+)
+
+TestAccessorCall(
+  Proxy.createFunction({}, function() { receiver = this; return 42 }),
+  Proxy.createFunction({}, function(x) { receiver = this; value = x })
+)
+
+TestAccessorCall(
+  CreateFrozen({}, function() { receiver = this; return 42 }),
+  CreateFrozen({}, function(x) { receiver = this; value = x })
+)
+
+
+
+// TODO(rossberg): Ultimately, I want to have the following test function
+// run through, but it currently fails on so many cases (some not even
+// involving proxies), that I leave that for later...
+/*
+function TestCalls() {
+  var handler = {
+    get: function(r, k) {
+      return k == "length" ? 2 : Function.prototype[k]
+    }
+  }
+  var bind = Function.prototype.bind
+  var o = {}
+
+  var traps = [
+    function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    },
+    function(x, y) { "use strict";
+      return {receiver: this, result: x + y, strict: true}
+    },
+    function() {
+      var x = arguments[0], y = arguments[1]
+      return {receiver: this, result: x + y, strict: false}
+    },
+    Proxy.createFunction(handler, function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    Proxy.createFunction(handler, function() {
+      var x = arguments[0], y = arguments[1]
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    Proxy.createFunction(handler, function(x, y) { "use strict"
+      return {receiver: this, result: x + y, strict: true}
+    }),
+    CreateFrozen(handler, function(x, y) {
+      return {receiver: this, result: x + y, strict: false}
+    }),
+    CreateFrozen(handler, function(x, y) { "use strict"
+      return {receiver: this, result: x + y, strict: true}
+    }),
+  ]
+  var creates = [
+    function(trap) { return trap },
+    function(trap) { return CreateFrozen({}, callTrap) },
+    function(trap) { return Proxy.createFunction(handler, callTrap) },
+    function(trap) {
+      return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
+    },
+    function(trap) {
+      return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
+    },
+  ]
+  var binds = [
+    function(f, o, x, y) { return f },
+    function(f, o, x, y) { return bind.call(f, o) },
+    function(f, o, x, y) { return bind.call(f, o, x) },
+    function(f, o, x, y) { return bind.call(f, o, x, y) },
+    function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
+    function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
+  ]
+  var calls = [
+    function(f, x, y) { return f(x, y) },
+    function(f, x, y) { var g = f; return g(x, y) },
+    function(f, x, y) { with ({}) return f(x, y) },
+    function(f, x, y) { var g = f; with ({}) return g(x, y) },
+    function(f, x, y, o) { with (o) return f(x, y) },
+    function(f, x, y, o) { return f.call(o, x, y) },
+    function(f, x, y, o) { return f.apply(o, [x, y]) },
+    function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
+    function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
+    function(f, x, y, o) { return %_CallFunction(o, x, y, f) },
+    function(f, x, y, o) { return %Call(o, x, y, f) },
+    function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
+    function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
+    function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
+    function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
+  ]
+  var receivers = [o, global_object, undefined, null, 2, "bla", true]
+  var expectedNonStricts = [o, global_object, global_object, global_object]
+
+  for (var t = 0; t < traps.length; ++t) {
+    for (var i = 0; i < creates.length; ++i) {
+      for (var j = 0; j < binds.length; ++j) {
+        for (var k = 0; k < calls.length; ++k) {
+          for (var m = 0; m < receivers.length; ++m) {
+            for (var n = 0; n < receivers.length; ++n) {
+              var bound = receivers[m]
+              var receiver = receivers[n]
+              var func = binds[j](creates[i](traps[t]), bound, 31, 11)
+              var expected = j > 0 ? bound : receiver
+              var expectedNonStrict = expectedNonStricts[j > 0 ? m : n]
+              o.f = func
+              global_object.f = func
+              var x = calls[k](func, 11, 31, receiver)
+              if (x !== undefined) {
+                assertEquals(42, x.result)
+                if (calls[k].length < 4)
+                  assertSame(x.strict ? undefined : global_object, x.receiver)
+                else if (x.strict)
+                  assertSame(expected, x.receiver)
+                else if (expectedNonStrict === undefined)
+                  assertSame(expected, x.receiver.valueOf())
+                else
+                  assertSame(expectedNonStrict, x.receiver)
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+TestCalls()
+*/
diff --git a/test/mjsunit/harmony/proxies-hash.js b/test/mjsunit/harmony/proxies-hash.js
new file mode 100644
index 0000000..abfc0f5
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-hash.js
@@ -0,0 +1,122 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --harmony-collections
+
+
+// Helper.
+
+function TestWithProxies(test, construct, handler) {
+  test(construct, handler, Proxy.create)
+  test(construct, handler, function(h) {
+    return Proxy.createFunction(h, function() {})
+  })
+}
+
+
+// Sets.
+
+function TestSet(construct, fix) {
+  TestWithProxies(TestSet2, construct, fix)
+}
+
+function TestSet2(construct, fix, create) {
+  var handler = {fix: function() { return {} }}
+  var p1 = create(handler)
+  var p2 = create(handler)
+  var p3 = create(handler)
+  fix(p3)
+
+  var s = construct();
+  s.add(p1);
+  s.add(p2);
+  assertTrue(s.has(p1));
+  assertTrue(s.has(p2));
+  assertFalse(s.has(p3));
+
+  fix(p1)
+  fix(p2)
+  assertTrue(s.has(p1));
+  assertTrue(s.has(p2));
+  assertFalse(s.has(p3));
+
+  s.delete(p2);
+  assertTrue(s.has(p1));
+  assertFalse(s.has(p2));
+  assertFalse(s.has(p3));
+}
+
+TestSet(Set, Object.seal)
+TestSet(Set, Object.freeze)
+TestSet(Set, Object.preventExtensions)
+
+
+// Maps and weak maps.
+
+function TestMap(construct, fix) {
+  TestWithProxies(TestMap2, construct, fix)
+}
+
+function TestMap2(construct, fix, create) {
+  var handler = {fix: function() { return {} }}
+  var p1 = create(handler)
+  var p2 = create(handler)
+  var p3 = create(handler)
+  fix(p3)
+
+  var m = construct();
+  m.set(p1, 123);
+  m.set(p2, 321);
+  assertTrue(m.has(p1));
+  assertTrue(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+
+  fix(p1)
+  fix(p2)
+  assertTrue(m.has(p1));
+  assertTrue(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+
+  m.delete(p2);
+  assertTrue(m.has(p1));
+  assertFalse(m.has(p2));
+  assertFalse(m.has(p3));
+  assertSame(123, m.get(p1));
+  assertSame(undefined, m.get(p2));
+}
+
+TestMap(Map, Object.seal)
+TestMap(Map, Object.freeze)
+TestMap(Map, Object.preventExtensions)
+
+TestMap(WeakMap, Object.seal)
+TestMap(WeakMap, Object.freeze)
+TestMap(WeakMap, Object.preventExtensions)
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index 3c4e5f6..50c8613 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,70 +28,167 @@
 // Flags: --harmony-proxies
 
 
-// TODO(rossberg): for-in for proxies not implemented.
-// TODO(rossberg): inheritance from proxies not implemented.
-// TODO(rossberg): function proxies as constructors not implemented.
-
-
 // Helper.
 
-function TestWithProxies(test, handler) {
-  test(handler, Proxy.create)
-  test(handler, function(h) {return Proxy.createFunction(h, function() {})})
+function TestWithProxies(test, x, y, z) {
+  test(Proxy.create, x, y, z)
+  test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
 }
 
 
-// Getters.
+
+// Getting property descriptors (Object.getOwnPropertyDescriptor).
+
+var key
+
+function TestGetOwnProperty(handler) {
+  TestWithProxies(TestGetOwnProperty2, handler)
+}
+
+function TestGetOwnProperty2(create, handler) {
+  var p = create(handler)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, "a").value)
+  assertEquals("a", key)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, 99).value)
+  assertEquals("99", key)
+}
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {get value() { return 42 }, get configurable() { return true }}
+  }
+})
+
+TestGetOwnProperty(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return {value: 42, configurable: true} }
+  }
+}))
+
+
+function TestGetOwnPropertyThrow(handler) {
+  TestWithProxies(TestGetOwnPropertyThrow2, handler)
+}
+
+function TestGetOwnPropertyThrow2(create, handler) {
+  var p = create(handler)
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, 77) }, "myexn")
+}
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {get value() { throw "myexn" }}
+  }
+})
+
+TestGetOwnPropertyThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Getters (dot, brackets).
+
+var key
 
 function TestGet(handler) {
   TestWithProxies(TestGet2, handler)
 }
 
-function TestGet2(handler, create) {
+function TestGet2(create, handler) {
   var p = create(handler)
   assertEquals(42, p.a)
+  assertEquals("a", key)
   assertEquals(42, p["b"])
+  assertEquals("b", key)
+  assertEquals(42, p[99])
+  assertEquals("99", key)
+  assertEquals(42, (function(n) { return p[n] })("c"))
+  assertEquals("c", key)
+  assertEquals(42, (function(n) { return p[n] })(101))
+  assertEquals("101", key)
 
-  // TODO(rossberg): inheritance from proxies not yet implemented.
-  // var o = Object.create(p, {x: {value: 88}})
-  // assertEquals(42, o.a)
-  // assertEquals(42, o["b"])
-  // assertEquals(88, o.x)
-  // assertEquals(88, o["x"])
+  var o = Object.create(p, {x: {value: 88}})
+  assertEquals(42, o.a)
+  assertEquals("a", key)
+  assertEquals(42, o["b"])
+  assertEquals("b", key)
+  assertEquals(42, o[99])
+  assertEquals("99", key)
+  assertEquals(88, o.x)
+  assertEquals(88, o["x"])
+  assertEquals(42, (function(n) { return o[n] })("c"))
+  assertEquals("c", key)
+  assertEquals(42, (function(n) { return o[n] })(101))
+  assertEquals("101", key)
+  assertEquals(88, (function(n) { return o[n] })("x"))
 }
 
 TestGet({
-  get: function(r, k) { return 42 }
+  get: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
   get: function(r, k) { return this.get2(r, k) },
-  get2: function(r, k) { return 42 }
+  get2: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
-  getPropertyDescriptor2: function(k) { return {value: 42} }
+  getPropertyDescriptor2: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) {
+    key = k;
     return {get value() { return 42 }}
   }
 })
 
 TestGet({
   get: undefined,
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet(Proxy.create({
   get: function(pr, pk) {
-    return function(r, k) { return 42 }
+    return function(r, k) { key = k; return 42 }
   }
 }))
 
@@ -100,14 +197,43 @@
   TestWithProxies(TestGetCall2, handler)
 }
 
-function TestGetCall2(handler, create) {
+function TestGetCall2(create, handler) {
   var p = create(handler)
   assertEquals(55, p.f())
+  assertEquals(55, p["f"]())
   assertEquals(55, p.f("unused", "arguments"))
   assertEquals(55, p.f.call(p))
+  assertEquals(55, p["f"].call(p))
+  assertEquals(55, p[101].call(p))
   assertEquals(55, p.withargs(45, 5))
   assertEquals(55, p.withargs.call(p, 11, 22))
+  assertEquals(55, (function(n) { return p[n]() })("f"))
+  assertEquals(55, (function(n) { return p[n].call(p) })("f"))
+  assertEquals(55, (function(n) { return p[n](15, 20) })("withargs"))
+  assertEquals(55, (function(n) { return p[n].call(p, 13, 21) })("withargs"))
   assertEquals("6655", "66" + p)  // calls p.toString
+
+  var o = Object.create(p, {g: {value: function(x) { return x + 88 }}})
+  assertEquals(55, o.f())
+  assertEquals(55, o["f"]())
+  assertEquals(55, o.f("unused", "arguments"))
+  assertEquals(55, o.f.call(o))
+  assertEquals(55, o.f.call(p))
+  assertEquals(55, o["f"].call(p))
+  assertEquals(55, o[101].call(p))
+  assertEquals(55, o.withargs(45, 5))
+  assertEquals(55, o.withargs.call(p, 11, 22))
+  assertEquals(90, o.g(2))
+  assertEquals(91, o.g.call(o, 3))
+  assertEquals(92, o.g.call(p, 4))
+  assertEquals(55, (function(n) { return o[n]() })("f"))
+  assertEquals(55, (function(n) { return o[n].call(o) })("f"))
+  assertEquals(55, (function(n) { return o[n](15, 20) })("withargs"))
+  assertEquals(55, (function(n) { return o[n].call(o, 13, 21) })("withargs"))
+  assertEquals(93, (function(n) { return o[n](5) })("g"))
+  assertEquals(94, (function(n) { return o[n].call(o, 6) })("g"))
+  assertEquals(95, (function(n) { return o[n].call(p, 7) })("g"))
+  assertEquals("6655", "66" + o)  // calls o.toString
 }
 
 TestGetCall({
@@ -168,10 +294,20 @@
   TestWithProxies(TestGetThrow2, handler)
 }
 
-function TestGetThrow2(handler, create) {
+function TestGetThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ p.a }, "myexn")
   assertThrows(function(){ p["b"] }, "myexn")
+  assertThrows(function(){ p[3] }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] })(99) }, "myexn")
+
+  var o = Object.create(p, {x: {value: 88}, '4': {value: 89}})
+  assertThrows(function(){ o.a }, "myexn")
+  assertThrows(function(){ o["b"] }, "myexn")
+  assertThrows(function(){ o[3] }, "myexn")
+  assertThrows(function(){ (function(n) { o[n] })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { o[n] })(99) }, "myexn")
 }
 
 TestGetThrow({
@@ -220,11 +356,11 @@
 var key
 var val
 
-function TestSet(handler, create) {
+function TestSet(handler) {
   TestWithProxies(TestSet2, handler)
 }
 
-function TestSet2(handler, create) {
+function TestSet2(create, handler) {
   var p = create(handler)
   assertEquals(42, p.a = 42)
   assertEquals("a", key)
@@ -232,6 +368,16 @@
   assertEquals(43, p["b"] = 43)
   assertEquals("b", key)
   assertEquals(43, val)
+  assertEquals(44, p[77] = 44)
+  assertEquals("77", key)
+  assertEquals(44, val)
+
+  assertEquals(45, (function(n) { return p[n] = 45 })("c"))
+  assertEquals("c", key)
+  assertEquals(45, val)
+  assertEquals(46, (function(n) { return p[n] = 46 })(99))
+  assertEquals("99", key)
+  assertEquals(46, val)
 }
 
 TestSet({
@@ -304,15 +450,17 @@
 }))
 
 
-
-function TestSetThrow(handler, create) {
+function TestSetThrow(handler) {
   TestWithProxies(TestSetThrow2, handler)
 }
 
-function TestSetThrow2(handler, create) {
+function TestSetThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ p.a = 42 }, "myexn")
   assertThrows(function(){ p["b"] = 42 }, "myexn")
+  assertThrows(function(){ p[22] = 42 }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] = 45 })("c") }, "myexn")
+  assertThrows(function(){ (function(n) { p[n] = 46 })(99) }, "myexn")
 }
 
 TestSetThrow({
@@ -424,6 +572,124 @@
 }))
 
 
+var key
+var val
+
+function TestSetForDerived(handler) {
+  TestWithProxies(TestSetForDerived2, handler)
+}
+
+function TestSetForDerived2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p, {x: {value: 88, writable: true},
+                            '1': {value: 89, writable: true}})
+
+  key = ""
+  assertEquals(48, o.x = 48)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(48, o.x)
+
+  assertEquals(47, o[1] = 47)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(47, o[1])
+
+  assertEquals(49, o.y = 49)
+  assertEquals("y", key)
+  assertEquals(49, o.y)
+
+  assertEquals(50, o[2] = 50)
+  assertEquals("2", key)
+  assertEquals(50, o[2])
+
+  assertEquals(44, o.p_writable = 44)
+  assertEquals("p_writable", key)
+  assertEquals(44, o.p_writable)
+
+  assertEquals(45, o.p_nonwritable = 45)
+  assertEquals("p_nonwritable", key)
+  assertEquals(45, o.p_nonwritable)
+
+  assertEquals(46, o.p_setter = 46)
+  assertEquals("p_setter", key)
+  assertEquals(46, val)  // written to parent
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setter"))
+
+  val = ""
+  assertEquals(47, o.p_nosetter = 47)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
+
+  key = ""
+  assertThrows(function(){ "use strict"; o.p_nosetter = 50 }, TypeError)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+
+  assertThrows(function(){ o.p_nonconf = 53 }, TypeError)
+  assertEquals("p_nonconf", key)
+
+  assertThrows(function(){ o.p_throw = 51 }, "myexn")
+  assertEquals("p_throw", key)
+
+  assertThrows(function(){ o.p_setterthrow = 52 }, "myexn")
+  assertEquals("p_setterthrow", key)
+}
+
+TestSetForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k;
+    switch (k) {
+      case "p_writable": return {writable: true, configurable: true}
+      case "p_nonwritable": return {writable: false, configurable: true}
+      case "p_setter":return {set: function(x) { val = x }, configurable: true}
+      case "p_nosetter": return {get: function() { return 1 }, configurable: true}
+      case "p_nonconf":return {}
+      case "p_throw": throw "myexn"
+      case "p_setterthrow": return {set: function(x) { throw "myexn" }}
+      default: return undefined
+    }
+  }
+})
+
+
+// Evil proxy-induced side-effects shouldn't crash.
+// TODO(rossberg): proper behaviour isn't really spec'ed yet, so ignore results.
+
+TestWithProxies(function(create) {
+  var calls = 0
+  var handler = {
+    getPropertyDescriptor: function() {
+      ++calls
+      return (calls % 2 == 1)
+        ? {get: function() { return 5 }, configurable: true}
+        : {set: function() { return false }, configurable: true}
+    }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Make proxy prototype property read-only after CanPut check.
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestWithProxies(function(create) {
+  var handler = {
+    getPropertyDescriptor: function() {
+      Object.defineProperty(o, "x", {get: function() { return 5 }});
+      return {set: function() {}}
+    }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Make object property read-only after CanPut check.
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+
+
+// TODO(rossberg): TestSetReject, returning false
+// TODO(rossberg): TestGetProperty, TestSetProperty
+
+
 
 // Property definition (Object.defineProperty and Object.defineProperties).
 
@@ -434,7 +700,7 @@
   TestWithProxies(TestDefine2, handler)
 }
 
-function TestDefine2(handler, create) {
+function TestDefine2(create, handler) {
   var p = create(handler)
   assertEquals(p, Object.defineProperty(p, "a", {value: 44}))
   assertEquals("a", key)
@@ -453,6 +719,12 @@
   assertEquals(46, desc.value)
   assertEquals(false, desc.enumerable)
 
+  assertEquals(p, Object.defineProperty(p, 101, {value: 47, enumerable: false}))
+  assertEquals("101", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(47, desc.value)
+  assertEquals(false, desc.enumerable)
+
   var attributes = {configurable: true, mine: 66, minetoo: 23}
   assertEquals(p, Object.defineProperty(p, "d", attributes))
   assertEquals("d", key)
@@ -474,20 +746,20 @@
   assertEquals("zzz", key)
   assertEquals(0, Object.getOwnPropertyNames(desc).length)
 
-// TODO(rossberg): This test requires for-in on proxies.
-//  var d = create({
-//    get: function(r, k) { return (k === "value") ? 77 : void 0 },
-//    getOwnPropertyNames: function() { return ["value"] }
-//  })
-//  assertEquals(1, Object.getOwnPropertyNames(d).length)
-//  assertEquals(77, d.value)
-//  assertEquals(p, Object.defineProperty(p, "p", d))
-//  assertEquals("p", key)
-//  assertEquals(1, Object.getOwnPropertyNames(desc).length)
-//  assertEquals(77, desc.value)
+  var d = create({
+    get: function(r, k) { return (k === "value") ? 77 : void 0 },
+    getOwnPropertyNames: function() { return ["value"] },
+    enumerate: function() { return ["value"] }
+  })
+  assertEquals(1, Object.getOwnPropertyNames(d).length)
+  assertEquals(77, d.value)
+  assertEquals(p, Object.defineProperty(p, "p", d))
+  assertEquals("p", key)
+  assertEquals(1, Object.getOwnPropertyNames(desc).length)
+  assertEquals(77, desc.value)
 
   var props = {
-    'bla': {},
+    '11': {},
     blub: {get: function() { return true }},
     '': {get value() { return 20 }},
     last: {value: 21, configurable: true, mine: "eyes"}
@@ -524,21 +796,21 @@
   TestWithProxies(TestDefineThrow2, handler)
 }
 
-function TestDefineThrow2(handler, create) {
+function TestDefineThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
+  assertThrows(function(){ Object.defineProperty(p, 0, {value: 44})}, "myexn")
 
-// TODO(rossberg): These tests require for-in on proxies.
-//  var d1 = create({
-//    get: function(r, k) { throw "myexn" },
-//    getOwnPropertyNames: function() { return ["value"] }
-//  })
-//  assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
-//  var d2 = create({
-//    get: function(r, k) { return 77 },
-//    getOwnPropertyNames: function() { throw "myexn" }
-//  })
-//  assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
+  var d1 = create({
+    get: function(r, k) { throw "myexn" },
+    getOwnPropertyNames: function() { return ["value"] }
+  })
+  assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
+  var d2 = create({
+    get: function(r, k) { return 77 },
+    getOwnPropertyNames: function() { throw "myexn" }
+  })
+  assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
 
   var props = {bla: {get value() { throw "otherexn" }}}
   assertThrows(function(){ Object.defineProperties(p, props) }, "otherexn")
@@ -573,12 +845,14 @@
   TestWithProxies(TestDelete2, handler)
 }
 
-function TestDelete2(handler, create) {
+function TestDelete2(create, handler) {
   var p = create(handler)
   assertEquals(true, delete p.a)
   assertEquals("a", key)
   assertEquals(true, delete p["b"])
   assertEquals("b", key)
+  assertEquals(true, delete p[1])
+  assertEquals("1", key)
 
   assertEquals(false, delete p.z1)
   assertEquals("z1", key)
@@ -591,6 +865,8 @@
     assertEquals("c", key)
     assertEquals(true, delete p["d"])
     assertEquals("d", key)
+    assertEquals(true, delete p[2])
+    assertEquals("2", key)
 
     assertThrows(function(){ delete p.z3 }, TypeError)
     assertEquals("z3", key)
@@ -619,15 +895,17 @@
   TestWithProxies(TestDeleteThrow2, handler)
 }
 
-function TestDeleteThrow2(handler, create) {
+function TestDeleteThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ delete p.a }, "myexn")
   assertThrows(function(){ delete p["b"] }, "myexn");
+  assertThrows(function(){ delete p[3] }, "myexn");
 
   (function() {
     "use strict"
     assertThrows(function(){ delete p.c }, "myexn")
     assertThrows(function(){ delete p["d"] }, "myexn")
+    assertThrows(function(){ delete p[4] }, "myexn");
   })()
 }
 
@@ -658,7 +936,7 @@
   TestWithProxies(TestDescriptor2, handler)
 }
 
-function TestDescriptor2(handler, create) {
+function TestDescriptor2(create, handler) {
   var p = create(handler)
   var descs = [
     {configurable: true},
@@ -697,7 +975,7 @@
   TestWithProxies(TestDescriptorThrow2, handler)
 }
 
-function TestDescriptorThrow2(handler, create) {
+function TestDescriptorThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
 }
@@ -721,7 +999,7 @@
   TestWithProxies(TestComparison2, eq)
 }
 
-function TestComparison2(eq, create) {
+function TestComparison2(create, eq) {
   var p1 = create({})
   var p2 = create({})
 
@@ -764,7 +1042,7 @@
   TestWithProxies(TestIn2, handler)
 }
 
-function TestIn2(handler, create) {
+function TestIn2(create, handler) {
   var p = create(handler)
   assertTrue("a" in p)
   assertEquals("a", key)
@@ -778,6 +1056,7 @@
   assertEquals(0, ("zzz" in p) ? 2 : 0)
   assertEquals(2, !("zzz" in p) ? 2 : 0)
 
+  // Test compilation in conditionals.
   if ("b" in p) {
   } else {
     assertTrue(false)
@@ -830,7 +1109,7 @@
 })
 
 TestIn({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) {
     key = k; return k < "z" ? {value: 42} : void 0
   }
@@ -847,9 +1126,10 @@
   TestWithProxies(TestInThrow2, handler)
 }
 
-function TestInThrow2(handler, create) {
+function TestInThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ return "a" in o }, "myexn")
+  assertThrows(function(){ return 99 in o }, "myexn")
   assertThrows(function(){ return !("a" in o) }, "myexn")
   assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
   assertThrows(function(){ if ("b" in o) {} }, "myexn")
@@ -876,7 +1156,7 @@
 })
 
 TestInThrow({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) { throw "myexn" }
 })
 
@@ -891,6 +1171,158 @@
 }))
 
 
+function TestInForDerived(handler) {
+  TestWithProxies(TestInForDerived2, handler)
+}
+
+function TestInForDerived2(create, handler) {
+  var p = create(handler)
+  var o = Object.create(p)
+
+  assertTrue("a" in o)
+  assertEquals("a", key)
+  assertTrue(99 in o)
+  assertEquals("99", key)
+  assertFalse("z" in o)
+  assertEquals("z", key)
+
+  assertEquals(2, ("a" in o) ? 2 : 0)
+  assertEquals(0, !("a" in o) ? 2 : 0)
+  assertEquals(0, ("zzz" in o) ? 2 : 0)
+  assertEquals(2, !("zzz" in o) ? 2 : 0)
+
+  if ("b" in o) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("b", key)
+
+  if ("zz" in o) {
+    assertTrue(false)
+  }
+  assertEquals("zz", key)
+
+  if (!("c" in o)) {
+    assertTrue(false)
+  }
+  assertEquals("c", key)
+
+  if (!("zzz" in o)) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("zzz", key)
+}
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+
+/* TODO(rossberg): this will work once we implement the newest proposal
+ * regarding default traps for getPropertyDescriptor.
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+*/
+
+TestInForDerived(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) {
+      key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+    }
+  }
+}))
+
+
+
+// Property descriptor conversion.
+
+var descget
+
+function TestDescriptorGetOrder(handler) {
+  var p = Proxy.create(handler)
+  var o = Object.create(p, {b: {value: 0}})
+  TestDescriptorGetOrder2(function(n) { return p[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in p }, "")
+  TestDescriptorGetOrder2(function(n) { return o[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in o }, "eEcCvVwWgs")
+}
+
+function TestDescriptorGetOrder2(f, access) {
+  descget = ""
+  assertTrue(f("a"))
+  assertEquals(access, descget)
+  descget = ""
+  assertTrue(f(99))
+  assertEquals(access, descget)
+  descget = ""
+  assertFalse(!!f("z"))
+  assertEquals("", descget)
+}
+
+TestDescriptorGetOrder({
+  getPropertyDescriptor: function(k) {
+    if (k >= "z") return void 0
+    // Return a proxy as property descriptor, so that we can log accesses.
+    return Proxy.create({
+      get: function(r, attr) {
+        descget += attr[0].toUpperCase()
+        return true
+      },
+      has: function(attr) {
+        descget += attr[0]
+        switch (attr) {
+          case "writable":
+          case "enumerable":
+          case "configurable":
+          case "value":
+            return true
+          case "get":
+          case "set":
+            return false
+          default:
+            assertUnreachable()
+        }
+      }
+    })
+  }
+})
+
+
 
 // Own Properties (Object.prototype.hasOwnProperty).
 
@@ -900,7 +1332,7 @@
   TestWithProxies(TestHasOwn2, handler)
 }
 
-function TestHasOwn2(handler, create) {
+function TestHasOwn2(create, handler) {
   var p = create(handler)
   assertTrue(Object.prototype.hasOwnProperty.call(p, "a"))
   assertEquals("a", key)
@@ -958,7 +1390,7 @@
   TestWithProxies(TestHasOwnThrow2, handler)
 }
 
-function TestHasOwnThrow2(handler, create) {
+function TestHasOwnThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
     "myexn")
@@ -1005,84 +1437,173 @@
 
 // Instanceof (instanceof)
 
-function TestInstanceof() {
-  var o = {}
+function TestProxyInstanceof() {
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
+  var o2 = Object.create(p2)
 
   var f0 = function() {}
-  f0.prototype = o
+  f0.prototype = o1
   var f1 = function() {}
   f1.prototype = p1
   var f2 = function() {}
   f2.prototype = p2
+  var f3 = function() {}
+  f3.prototype = o2
 
-  assertTrue(o instanceof Object)
-  assertFalse(o instanceof f0)
-  assertFalse(o instanceof f1)
-  assertFalse(o instanceof f2)
+  assertTrue(o1 instanceof Object)
+  assertFalse(o1 instanceof f0)
+  assertFalse(o1 instanceof f1)
+  assertFalse(o1 instanceof f2)
+  assertFalse(o1 instanceof f3)
   assertFalse(p1 instanceof Object)
   assertFalse(p1 instanceof f0)
   assertFalse(p1 instanceof f1)
   assertFalse(p1 instanceof f2)
+  assertFalse(p1 instanceof f3)
   assertTrue(p2 instanceof Object)
   assertTrue(p2 instanceof f0)
   assertFalse(p2 instanceof f1)
   assertFalse(p2 instanceof f2)
+  assertFalse(p2 instanceof f3)
   assertTrue(p3 instanceof Object)
   assertTrue(p3 instanceof f0)
   assertFalse(p3 instanceof f1)
   assertTrue(p3 instanceof f2)
+  assertFalse(p3 instanceof f3)
+  assertTrue(o2 instanceof Object)
+  assertTrue(o2 instanceof f0)
+  assertFalse(o2 instanceof f1)
+  assertTrue(o2 instanceof f2)
+  assertFalse(o2 instanceof f3)
 
   var f = Proxy.createFunction({}, function() {})
   assertTrue(f instanceof Function)
 }
 
-TestInstanceof()
+TestProxyInstanceof()
+
+
+function TestInstanceofProxy() {
+  var o0 = Object.create(null)
+  var o1 = {}
+  var o2 = Object.create(o0)
+  var o3 = Object.create(o1)
+  var o4 = Object.create(o2)
+  var o5 = Object.create(o3)
+
+  function handler(o) { return {get: function() { return o } } }
+  var f0 = Proxy.createFunction(handler(o0), function() {})
+  var f1 = Proxy.createFunction(handler(o1), function() {})
+  var f2 = Proxy.createFunction(handler(o2), function() {})
+  var f3 = Proxy.createFunction(handler(o3), function() {})
+  var f4 = Proxy.createFunction(handler(o4), function() {})
+  var f5 = Proxy.createFunction(handler(o4), function() {})
+
+  assertFalse(null instanceof f0)
+  assertFalse(o0 instanceof f0)
+  assertFalse(o0 instanceof f1)
+  assertFalse(o0 instanceof f2)
+  assertFalse(o0 instanceof f3)
+  assertFalse(o0 instanceof f4)
+  assertFalse(o0 instanceof f5)
+  assertFalse(o1 instanceof f0)
+  assertFalse(o1 instanceof f1)
+  assertFalse(o1 instanceof f2)
+  assertFalse(o1 instanceof f3)
+  assertFalse(o1 instanceof f4)
+  assertFalse(o1 instanceof f5)
+  assertTrue(o2 instanceof f0)
+  assertFalse(o2 instanceof f1)
+  assertFalse(o2 instanceof f2)
+  assertFalse(o2 instanceof f3)
+  assertFalse(o2 instanceof f4)
+  assertFalse(o2 instanceof f5)
+  assertFalse(o3 instanceof f0)
+  assertTrue(o3 instanceof f1)
+  assertFalse(o3 instanceof f2)
+  assertFalse(o3 instanceof f3)
+  assertFalse(o3 instanceof f4)
+  assertFalse(o3 instanceof f5)
+  assertTrue(o4 instanceof f0)
+  assertFalse(o4 instanceof f1)
+  assertTrue(o4 instanceof f2)
+  assertFalse(o4 instanceof f3)
+  assertFalse(o4 instanceof f4)
+  assertFalse(o4 instanceof f5)
+  assertFalse(o5 instanceof f0)
+  assertTrue(o5 instanceof f1)
+  assertFalse(o5 instanceof f2)
+  assertTrue(o5 instanceof f3)
+  assertFalse(o5 instanceof f4)
+  assertFalse(o5 instanceof f5)
+
+  var f = Proxy.createFunction({}, function() {})
+  var ff = Proxy.createFunction(handler(Function), function() {})
+  assertTrue(f instanceof Function)
+  assertFalse(f instanceof ff)
+}
+
+TestInstanceofProxy()
 
 
 
 // Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
 
 function TestPrototype() {
-  var o = {}
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
-  var p4 = Proxy.create({}, 666)
+  var p4 = Proxy.create({}, null)
+  var o2 = Object.create(p3)
 
-  assertSame(Object.getPrototypeOf(o), Object.prototype)
+  assertSame(Object.getPrototypeOf(o1), Object.prototype)
   assertSame(Object.getPrototypeOf(p1), null)
-  assertSame(Object.getPrototypeOf(p2), o)
+  assertSame(Object.getPrototypeOf(p2), o1)
   assertSame(Object.getPrototypeOf(p3), p2)
   assertSame(Object.getPrototypeOf(p4), null)
+  assertSame(Object.getPrototypeOf(o2), p3)
 
-  assertTrue(Object.prototype.isPrototypeOf(o))
+  assertTrue(Object.prototype.isPrototypeOf(o1))
   assertFalse(Object.prototype.isPrototypeOf(p1))
   assertTrue(Object.prototype.isPrototypeOf(p2))
   assertTrue(Object.prototype.isPrototypeOf(p3))
   assertFalse(Object.prototype.isPrototypeOf(p4))
-  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
+  assertTrue(Object.prototype.isPrototypeOf(o2))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, o))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p1))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
-  assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(p2, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(p3, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o2))
 
   var f = Proxy.createFunction({}, function() {})
   assertSame(Object.getPrototypeOf(f), Function.prototype)
@@ -1097,12 +1618,12 @@
 // Property names (Object.getOwnPropertyNames, Object.keys).
 
 function TestPropertyNames(names, handler) {
-  TestWithProxies(TestPropertyNames2, [names, handler])
+  TestWithProxies(TestPropertyNames2, handler, names)
 }
 
-function TestPropertyNames2(names_handler, create) {
-  var p = create(names_handler[1])
-  assertArrayEquals(names_handler[0], Object.getOwnPropertyNames(p))
+function TestPropertyNames2(create, handler, names) {
+  var p = create(handler)
+  assertArrayEquals(names, Object.getOwnPropertyNames(p))
 }
 
 TestPropertyNames([], {
@@ -1129,7 +1650,7 @@
   TestWithProxies(TestPropertyNamesThrow2, handler)
 }
 
-function TestPropertyNamesThrow2(handler, create) {
+function TestPropertyNamesThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.getOwnPropertyNames(p) }, "myexn")
 }
@@ -1145,12 +1666,12 @@
 
 
 function TestKeys(names, handler) {
-  TestWithProxies(TestKeys2, [names, handler])
+  TestWithProxies(TestKeys2, handler, names)
 }
 
-function TestKeys2(names_handler, create) {
-  var p = create(names_handler[1])
-  assertArrayEquals(names_handler[0], Object.keys(p))
+function TestKeys2(create, handler, names) {
+  var p = create(handler)
+  assertArrayEquals(names, Object.keys(p))
 }
 
 TestKeys([], {
@@ -1174,7 +1695,9 @@
 
 TestKeys(["a", "0"], {
   getOwnPropertyNames: function() { return ["a", 23, "zz", "", 0] },
-  getOwnPropertyDescriptor: function(k) { return {enumerable: k.length == 1} }
+  getOwnPropertyDescriptor: function(k) {
+    return k == "" ? undefined : {enumerable: k.length == 1}
+  }
 })
 
 TestKeys(["23", "zz", ""], {
@@ -1188,10 +1711,12 @@
 
 TestKeys(["a", "b", "c", "5"], {
   get getOwnPropertyNames() {
-    return function() { return ["0", 4, "a", "b", "c", 5] }
+    return function() { return ["0", 4, "a", "b", "c", 5, "ety"] }
   },
   get getOwnPropertyDescriptor() {
-    return function(k) { return {enumerable: k >= "44"} }
+    return function(k) {
+      return k == "ety" ? undefined : {enumerable: k >= "44"}
+    }
   }
 })
 
@@ -1207,7 +1732,7 @@
   TestWithProxies(TestKeysThrow2, handler)
 }
 
-function TestKeysThrow2(handler, create) {
+function TestKeysThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.keys(p) }, "myexn")
 }
@@ -1267,7 +1792,6 @@
 // Fixing (Object.freeze, Object.seal, Object.preventExtensions,
 //         Object.isFrozen, Object.isSealed, Object.isExtensible)
 
-// TODO(rossberg): use TestWithProxies to include funciton proxies
 function TestFix(names, handler) {
   var proto = {p: 77}
   var assertFixing = function(o, s, f, e) {
@@ -1314,19 +1838,27 @@
                     Object.keys(p3).sort())
   assertEquals(proto, Object.getPrototypeOf(p3))
   assertEquals(77, p3.p)
+
+  var p = Proxy.create(handler, proto)
+  var o = Object.create(p)
+  assertFixing(p, false, false, true)
+  assertFixing(o, false, false, true)
+  Object.freeze(o)
+  assertFixing(p, false, false, true)
+  assertFixing(o, true, true, false)
 }
 
 TestFix([], {
   fix: function() { return {} }
 })
 
-TestFix(["a", "b", "c", "d", "zz"], {
+TestFix(["a", "b", "c", "3", "zz"], {
   fix: function() {
     return {
       a: {value: "a", writable: true, configurable: false, enumerable: true},
       b: {value: 33, writable: false, configurable: false, enumerable: true},
       c: {value: 0, writable: true, configurable: true, enumerable: true},
-      d: {value: true, writable: false, configurable: true, enumerable: true},
+      '3': {value: true, writable: false, configurable: true, enumerable: true},
       zz: {value: 0, enumerable: false}
     }
   }
@@ -1377,8 +1909,8 @@
   TestWithProxies(TestFixThrow2, handler)
 }
 
-function TestFixThrow2(handler) {
-  var p = Proxy.create(handler, {})
+function TestFixThrow2(create, handler) {
+  var p = create(handler, {})
   assertThrows(function(){ Object.seal(p) }, "myexn")
   assertThrows(function(){ Object.freeze(p) }, "myexn")
   assertThrows(function(){ Object.preventExtensions(p) }, "myexn")
@@ -1404,6 +1936,135 @@
 })
 
 
+// Freeze a proxy in the middle of operations on it.
+// TODO(rossberg): actual behaviour not specified consistently at the moment,
+// just make sure that we do not crash.
+function TestReentrantFix(f) {
+  TestWithProxies(f, Object.freeze)
+  TestWithProxies(f, Object.seal)
+  TestWithProxies(f, Object.preventExtensions)
+}
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get get() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while getting get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get: function() { freeze(p); return 3 },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing default get trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {get: function(){}} },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while getting a property from prototype.
+  try { o.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get set() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while getting set trap.
+  try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    set: function() { freeze(p); return true },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing set trap.
+  try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getOwnPropertyDescriptor: function() { freeze(p); return undefined },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing default set trap.
+  try { p.x } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
+    fix: function() { return {} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while setting a property in prototype, dropping the property!
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
+    fix: function() { return {x: {get: function(){}}} }
+  }
+  var p = create(handler)
+  var o = Object.create(p)
+  // Freeze while setting a property in prototype, making it read-only!
+  try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    get fix() { freeze(p); return function(){} }
+  }
+  var p = create(handler)
+  // Freeze while getting fix trap.
+  try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
+})
+
+TestReentrantFix(function(create, freeze) {
+  var handler = {
+    fix: function() { freeze(p); return {} }
+  }
+  var p = create(handler)
+  // Freeze while executing fix trap.
+  try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
+  p = create(handler)
+  try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
+})
+
+
 
 // String conversion (Object.prototype.toString,
 //                    Object.prototype.toLocaleString,
@@ -1426,6 +2087,13 @@
   assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
   assertEquals("toString", key)
   assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
+
+  var o = Object.create(p)
+  key = ""
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertEquals("", key)
+  assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
+  assertEquals("toString", key)
 }
 
 TestToString({
@@ -1452,6 +2120,10 @@
   var f = Proxy.createFunction(handler, function() {})
   assertEquals("[object Function]", Object.prototype.toString.call(f))
   assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
+
+  var o = Object.create(p)
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertThrows(function(){ Object.prototype.toLocaleString.call(o) }, "myexn")
 }
 
 TestToStringThrow({
@@ -1485,7 +2157,7 @@
   TestWithProxies(TestValueOf2, handler)
 }
 
-function TestValueOf2(handler, create) {
+function TestValueOf2(create, handler) {
   var p = create(handler)
   assertSame(p, Object.prototype.valueOf.call(p))
 }
@@ -1502,7 +2174,7 @@
   TestWithProxies(TestIsEnumerable2, handler)
 }
 
-function TestIsEnumerable2(handler, create) {
+function TestIsEnumerable2(create, handler) {
   var p = create(handler)
   assertTrue(Object.prototype.propertyIsEnumerable.call(p, "a"))
   assertEquals("a", key)
@@ -1510,6 +2182,11 @@
   assertEquals("2", key)
   assertFalse(Object.prototype.propertyIsEnumerable.call(p, "z"))
   assertEquals("z", key)
+
+  var o = Object.create(p)
+  key = ""
+  assertFalse(Object.prototype.propertyIsEnumerable.call(o, "a"))
+  assertEquals("", key)  // trap not invoked
 }
 
 TestIsEnumerable({
@@ -1546,7 +2223,7 @@
   TestWithProxies(TestIsEnumerableThrow2, handler)
 }
 
-function TestIsEnumerableThrow2(handler, create) {
+function TestIsEnumerableThrow2(create, handler) {
   var p = create(handler)
   assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, "a") },
     "myexn")
@@ -1580,103 +2257,3 @@
     return function(k) { throw "myexn" }
   }
 }))
-
-
-
-// Calling (call, Function.prototype.call, Function.prototype.apply,
-//          Function.prototype.bind).
-
-var global = this
-var receiver
-
-function TestCall(isStrict, callTrap) {
-  assertEquals(42, callTrap(5, 37))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
-
-  var f = Proxy.createFunction({fix: function() { return {} }}, callTrap)
-  receiver = 333
-  assertEquals(42, f(11, 31))
-  assertEquals(isStrict ? undefined : global, receiver)
-  var o = {}
-  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-  assertEquals(o, receiver)
-  assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
-  assertEquals(isStrict ? null : global, receiver)
-  assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
-  assertEquals(2, receiver.valueOf())
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-  assertEquals(o, receiver)
-  var ff = Function.prototype.bind.call(f, o, 12)
-  receiver = 333
-  assertEquals(42, ff(30))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
-  assertEquals(o, receiver)
-
-  Object.freeze(f)
-  receiver = 333
-  assertEquals(42, f(11, 31))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
-  receiver = 333
-  assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(42, ff(30))
-  assertEquals(o, receiver)
-  receiver = 333
-  assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
-  assertEquals(o, receiver)
-}
-
-TestCall(false, function(x, y) {
-  receiver = this; return x + y
-})
-
-TestCall(true, function(x, y) {
-  "use strict";
-  receiver = this; return x + y
-})
-
-TestCall(false, Proxy.createFunction({}, function(x, y) {
-  receiver = this; return x + y
-}))
-
-TestCall(true, Proxy.createFunction({}, function(x, y) {
-  "use strict";
-  receiver = this; return x + y
-}))
-
-var p = Proxy.createFunction({fix: function() {return {}}}, function(x, y) {
-  receiver = this; return x + y
-})
-TestCall(false, p)
-Object.freeze(p)
-TestCall(false, p)
-
-
-function TestCallThrow(callTrap) {
-  var f = Proxy.createFunction({fix: function() {return {}}}, callTrap)
-  assertThrows(function(){ f(11) }, "myexn")
-  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-
-  Object.freeze(f)
-  assertThrows(function(){ f(11) }, "myexn")
-  assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
-  assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
-}
-
-TestCallThrow(function() { throw "myexn" })
-TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-
-var p = Proxy.createFunction(
-  {fix: function() {return {}}}, function() { throw "myexn" })
-Object.freeze(p)
-TestCallThrow(p)
diff --git a/test/mjsunit/harmony/weakmaps.js b/test/mjsunit/harmony/weakmaps.js
deleted file mode 100644
index 7b5dcaf..0000000
--- a/test/mjsunit/harmony/weakmaps.js
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-weakmaps --expose-gc
-
-
-// Test valid getter and setter calls
-var m = new WeakMap;
-assertDoesNotThrow(function () { m.get(new Object) });
-assertDoesNotThrow(function () { m.set(new Object) });
-assertDoesNotThrow(function () { m.has(new Object) });
-assertDoesNotThrow(function () { m.delete(new Object) });
-
-
-// Test invalid getter and setter calls
-var m = new WeakMap;
-assertThrows(function () { m.get(undefined) }, TypeError);
-assertThrows(function () { m.set(undefined, 0) }, TypeError);
-assertThrows(function () { m.get(0) }, TypeError);
-assertThrows(function () { m.set(0, 0) }, TypeError);
-assertThrows(function () { m.get('a-key') }, TypeError);
-assertThrows(function () { m.set('a-key', 0) }, TypeError);
-
-
-// Test expected mapping behavior
-var m = new WeakMap;
-function TestMapping(map, key, value) {
-  map.set(key, value);
-  assertSame(value, map.get(key));
-}
-TestMapping(m, new Object, 23);
-TestMapping(m, new Object, 'the-value');
-TestMapping(m, new Object, new Object);
-
-
-// Test expected querying behavior
-var m = new WeakMap;
-var key = new Object;
-TestMapping(m, key, 'to-be-present');
-assertTrue(m.has(key));
-assertFalse(m.has(new Object));
-TestMapping(m, key, undefined);
-assertFalse(m.has(key));
-assertFalse(m.has(new Object));
-
-
-// Test expected deletion behavior
-var m = new WeakMap;
-var key = new Object;
-TestMapping(m, key, 'to-be-deleted');
-assertTrue(m.delete(key));
-assertFalse(m.delete(key));
-assertFalse(m.delete(new Object));
-assertSame(m.get(key), undefined);
-
-
-// Test GC of map with entry
-var m = new WeakMap;
-var key = new Object;
-m.set(key, 'not-collected');
-gc();
-assertSame('not-collected', m.get(key));
-
-
-// Test GC of map with chained entries
-var m = new WeakMap;
-var head = new Object;
-for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
-  m.set(key, new Object);
-}
-gc();
-var count = 0;
-for (key = head; key != undefined; key = m.get(key)) {
-  count++;
-}
-assertEquals(11, count);
-
-
-// Test property attribute [[Enumerable]]
-var m = new WeakMap;
-function props(x) {
-  var array = [];
-  for (var p in x) array.push(p);
-  return array.sort();
-}
-assertArrayEquals([], props(WeakMap));
-assertArrayEquals([], props(WeakMap.prototype));
-assertArrayEquals([], props(m));
-
-
-// Test arbitrary properties on weak maps
-var m = new WeakMap;
-function TestProperty(map, property, value) {
-  map[property] = value;
-  assertEquals(value, map[property]);
-}
-for (i = 0; i < 20; i++) {
-  TestProperty(m, i, 'val' + i);
-  TestProperty(m, 'foo' + i, 'bar' + i);
-}
-TestMapping(m, new Object, 'foobar');
-
-
-// Test direct constructor call
-var m = WeakMap();
-assertTrue(m instanceof WeakMap);
-
-
-// Test some common JavaScript idioms
-var m = new WeakMap;
-assertTrue(m instanceof WeakMap);
-assertTrue(WeakMap.prototype.set instanceof Function)
-assertTrue(WeakMap.prototype.get instanceof Function)
-assertTrue(WeakMap.prototype.has instanceof Function)
-assertTrue(WeakMap.prototype.delete instanceof Function)
-
-
-// Regression test for WeakMap prototype.
-assertTrue(WeakMap.prototype.constructor === WeakMap)
-assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
-
-
-// Regression test for issue 1617: The prototype of the WeakMap constructor
-// needs to be unique (i.e. different from the one of the Object constructor).
-assertFalse(WeakMap.prototype === Object.prototype);
-var o = Object.create({});
-assertFalse("get" in o);
-assertFalse("set" in o);
-assertEquals(undefined, o.get);
-assertEquals(undefined, o.set);
-var o = Object.create({}, { myValue: {
-  value: 10,
-  enumerable: false,
-  configurable: true,
-  writable: true
-}});
-assertEquals(10, o.myValue);
-
-
-// Stress Test
-// There is a proposed stress-test available at the es-discuss mailing list
-// which cannot be reasonably automated.  Check it out by hand if you like:
-// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index faa5a43..6f6e323 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -223,7 +223,7 @@
   assertSame = function assertSame(expected, found, name_opt) {
     if (found === expected) {
       if (expected !== 0 || (1 / expected) == (1 / found)) return;
-    } else if (isNaN(expected) && isNaN(found)) {
+    } else if ((expected !== expected) && (found !== found)) {
       return;
     }
     fail(PrettyPrint(expected), found, name_opt);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index bae09b4..cdac99b 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -34,9 +34,13 @@
 # Fails.
 regress/regress-1119: FAIL
 
-#############################################################################
-# Fails due to r10102 which reverts precise stepping on the 3.6 branch.
-debug-step-2: FAIL
+##############################################################################
+
+# Issue 1845: http://code.google.com/p/v8/issues/detail?id=1845
+harmony/proxies-example-membrane: SKIP
+
+# NewGC: BUG(1719) slow to collect arrays over several contexts.
+regress/regress-524: SKIP
 
 ##############################################################################
 # Too slow in debug mode with --stress-opt
@@ -64,7 +68,6 @@
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
 
-
 ##############################################################################
 [ $arch == arm ]
 
diff --git a/test/mjsunit/object-define-properties.js b/test/mjsunit/object-define-properties.js
index 128df69..6d5032e 100644
--- a/test/mjsunit/object-define-properties.js
+++ b/test/mjsunit/object-define-properties.js
@@ -54,3 +54,19 @@
 
 assertEquals(x.foo, 10);
 assertEquals(x.bar, 42);
+
+
+// Make sure that all property descriptors are calculated before any
+// modifications are done.
+
+var object = {};
+
+assertThrows(function() {
+    Object.defineProperties(object, {
+      foo: { value: 1 },
+      bar: { value: 2, get: function() { return 3; } }
+    });
+  }, TypeError);
+
+assertEquals(undefined, object.foo);
+assertEquals(undefined, object.bar);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/optimized-typeof.js
similarity index 79%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/optimized-typeof.js
index 2502b53..b0c0725 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/optimized-typeof.js
@@ -25,22 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --allow-natives-syntax
 
-var e = new Error();
-assertEquals('Error', e + '');
+function typeofDirectly() {
+  return typeof({}) === "undefined";
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+typeofDirectly();
+typeofDirectly();
+%OptimizeFunctionOnNextCall(typeofDirectly);
+typeofDirectly();
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+function typeofViaVariable() {
+  var foo = typeof({})
+  return foo === "undefined";
+}
+
+typeofViaVariable();
+typeofViaVariable();
+%OptimizeFunctionOnNextCall(typeofViaVariable);
+typeofViaVariable();
diff --git a/test/mjsunit/regexp-static.js b/test/mjsunit/regexp-static.js
index 0f84968..8f283f6 100644
--- a/test/mjsunit/regexp-static.js
+++ b/test/mjsunit/regexp-static.js
@@ -25,18 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that we throw exceptions when calling test and exec with no
-// input.  This is not part of the spec, but we do it for
-// compatibility with JSC.
-assertThrows("/a/.test()");
-assertThrows("/a/.exec()");
-
-// Test that we do not throw exceptions once the static RegExp.input
-// field has been set.
-RegExp.input = "a";
-assertDoesNotThrow("/a/.test()");
-assertDoesNotThrow("/a/.exec()");
-
 // Test the (deprecated as of JS 1.5) properties of the RegExp function.
 var re = /((\d+)\.(\d+))/;
 var s = 'abc123.456def';
@@ -166,3 +154,8 @@
 var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
 assertTrue(/^([a-z]+): (.*)/.test(foo.substring(foo.indexOf("regexp:"))), "regexp: setup");
 assertEquals("regexp", RegExp.$1, "RegExp.$1");
+
+
+// Check that calling with no argument is the same as calling with undefined.
+assertTrue(/^undefined$/.test());
+assertEquals(["undefined"], /^undefined$/.exec());
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-100702.js
similarity index 79%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-100702.js
index 2502b53..46494ab 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-100702.js
@@ -25,22 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Regression test for correct handling of non-object receiver values
+// passed to built-in array functions.
 
-var e = new Error();
-assertEquals('Error', e + '');
+String.prototype.isThatMe = function () {
+  assertFalse(this === str);
+};
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+var str = "abc";
+str.isThatMe();
+str.isThatMe.call(str);
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var arr = [1];
+arr.forEach("".isThatMe, str);
+arr.filter("".isThatMe, str);
+arr.some("".isThatMe, str);
+arr.every("".isThatMe, str);
+arr.map("".isThatMe, str);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-109195.js
similarity index 66%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-109195.js
index 2502b53..97538aa 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-109195.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,22 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --expose-debug-as debug
+var Debug = debug.Debug;
 
-var e = new Error();
-assertEquals('Error', e + '');
+function listener(event, exec_state, event_data, data) {
+  for (var i = 0, n = exec_state.frameCount(); i < n; i++) {
+    exec_state.frame().scopeCount(i);
+  }
+  exec_state.prepareStep(Debug.StepAction.Continue, 1);
+}
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+Debug.setListener(listener);
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var F = function () {
+  1, function () {
+    var d = 0;
+    (function () { d; });
+    debugger;
+  }();
+};
+
+var src = "(" + F.toString() + ")()";
+eval(src);
+
+Function.prototype.__defineGetter__("f", function () {
+  debugger;
+  return 0;
+});
+
+var G = function () {
+  1, function () {
+    var d = 0;
+    (function () { d; });
+    debugger;
+  }['f'];
+};
+
+var src = "(" + G.toString() + ")()";
+eval(src);
diff --git a/test/mjsunit/regress/regress-1110.js b/test/mjsunit/regress/regress-1110.js
index 43b8d77..124f520 100644
--- a/test/mjsunit/regress/regress-1110.js
+++ b/test/mjsunit/regress/regress-1110.js
@@ -28,10 +28,9 @@
 // Test that the illegal continue is thrown at parse time.
 
 try {
-  function Crash() { continue;if (Crash) {
-    } }
+  eval("function Crash() { assertUnreachable(); continue;if (Crash) {  } }");
   Crash();
-  assertTrue(false);
+  assertUnreachable();
 } catch (e) {
   assertTrue(e instanceof SyntaxError);
   assertTrue(/continue/.test(e.message));
diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js
index 95684c5..66ed9f2 100644
--- a/test/mjsunit/regress/regress-1170.js
+++ b/test/mjsunit/regress/regress-1170.js
@@ -49,7 +49,7 @@
   exception = true;
   assertTrue(/TypeError/.test(e));
 }
-assertTrue(exception);
+assertFalse(exception);
 
 exception = false;
 try {
diff --git a/test/mjsunit/regress/regress-1213575.js b/test/mjsunit/regress/regress-1213575.js
index 9d82064..f3a11db 100644
--- a/test/mjsunit/regress/regress-1213575.js
+++ b/test/mjsunit/regress/regress-1213575.js
@@ -25,17 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Make sure that a const definition always
-// conflicts with a defined setter. This avoid
-// trying to pass 'the hole' to the setter.
+// Make sure that a const definition does not try
+// to pass 'the hole' to a defined setter.
 
-this.__defineSetter__('x', function(value) { assertTrue(false); });
+this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
 
 var caught = false;
 try {
-  eval('const x');
+  eval('const x = 1');
 } catch(e) {
   assertTrue(e instanceof TypeError);
   caught = true;
 }
-assertTrue(caught);
+assertFalse(caught);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1217.js
similarity index 68%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1217.js
index 2502b53..6530549 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1217.js
@@ -25,22 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Check that RegExp.prototype is itself a RegExp object.
 
-var e = new Error();
-assertEquals('Error', e + '');
+var proto = RegExp.prototype;
+assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+assertEquals("", proto.source);
+assertEquals(false, proto.global);
+assertEquals(false, proto.multiline);
+assertEquals(false, proto.ignoreCase);
+assertEquals(0, proto.lastIndex);
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+assertEquals("/(?:)/", proto.toString());
+
+var execResult = proto.exec("argle");
+assertEquals(1, execResult.length);
+assertEquals("", execResult[0]);
+assertEquals("argle", execResult.input);
+assertEquals(0, execResult.index);
+
+assertTrue(proto.test("argle"));
+
+// We disallow re-compiling the RegExp.prototype object.
+assertThrows(function(){ proto.compile("something"); }, TypeError);
diff --git a/test/mjsunit/regress/regress-1229.js b/test/mjsunit/regress/regress-1229.js
index e16d278..c0dcba9 100644
--- a/test/mjsunit/regress/regress-1229.js
+++ b/test/mjsunit/regress/regress-1229.js
@@ -35,10 +35,10 @@
   assertEquals(3, z);
 }
 
-var bound_arg = [1];
+var foob = foo.bind({}, 1);
 
 function f(y, z) {
-  return %NewObjectFromBound(foo, bound_arg);
+  return %NewObjectFromBound(foob);
 }
 
 // Check that %NewObjectFromBound looks at correct frame for inlined function.
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1415.js
similarity index 73%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1415.js
index 2502b53..f993e9b 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1415.js
@@ -25,22 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Surrogate pair range.
+// U+D800
+assertThrows(function(){ decodeURIComponent("%ED%A0%80"); }, URIError);
+// U+DBFF
+assertThrows(function(){ decodeURIComponent("%ED%AF%BF"); }, URIError);
+// U+DC00
+assertThrows(function(){ decodeURIComponent("%ED%B0%80"); }, URIError);
+// U+DFFF
+assertThrows(function(){ decodeURIComponent("%ED%BF%BF"); }, URIError);
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+// Overlong encodings
+// U+007F in two bytes.
+assertThrows(function(){ decodeURIComponent("%C1%BF"); }, URIError);
+// U+07FF in three bytes.
+assertThrows(function(){ decodeURIComponent("%E0%9F%BF"); }, URIError);
diff --git a/test/mjsunit/regress/regress-1639-2.js b/test/mjsunit/regress/regress-1639-2.js
new file mode 100644
index 0000000..c439dd8
--- /dev/null
+++ b/test/mjsunit/regress/regress-1639-2.js
@@ -0,0 +1,93 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+}
+
+var state = 0;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+      print('event data: ' + event_data.toJSONProtocol());
+      print();
+      assertEquals('// BREAK', line.substr(-8),
+                   "should not break outside evaluate");
+
+      switch (state) {
+      case 0:
+        state = 1;
+        // While in the debugger and stepping through a set of instructions
+        // executed in the evaluate command, the stepping must stop at the end
+        // of the said set of instructions and not step further into native
+        // debugger code.
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "evaluate",
+          arguments : {
+            'expression' : 'print("A"); debugger; print("B"); // BREAK',
+            'global' : true
+          }
+        });
+        break;
+      case 1:
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "continue",
+          arguments : {
+            stepaction : "next"
+          }
+        });
+        break;
+      }
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function a() {
+} // BREAK
+
+// Set a break point and call to invoke the debug event listener.
+Debug.setBreakPoint(a, 0, 0);
+a();
diff --git a/test/mjsunit/regress/regress-1692.js b/test/mjsunit/regress/regress-1692.js
new file mode 100644
index 0000000..06bd66c
--- /dev/null
+++ b/test/mjsunit/regress/regress-1692.js
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that Object.prototype.propertyIsEnumerable handles array indices
+// correctly.
+
+var p = Object.create({}, {
+  a : { value : 42, enumerable : true },
+  b : { value : 42, enumerable : false },
+  1 : { value : 42, enumerable : true },
+  2 : { value : 42, enumerable : false },
+  f : { get: function(){}, enumerable: true },
+  g : { get: function(){}, enumerable: false },
+  11 : { get: function(){}, enumerable: true },
+  12 : { get: function(){}, enumerable: false }
+});
+var o = Object.create(p, {
+  c : { value : 42, enumerable : true },
+  d : { value : 42, enumerable : false },
+  3 : { value : 42, enumerable : true },
+  4 : { value : 42, enumerable : false },
+  h : { get: function(){}, enumerable: true },
+  k : { get: function(){}, enumerable: false },
+  13 : { get: function(){}, enumerable: true },
+  14 : { get: function(){}, enumerable: false }
+});
+
+// Inherited properties are ignored.
+assertFalse(o.propertyIsEnumerable("a"));
+assertFalse(o.propertyIsEnumerable("b"));
+assertFalse(o.propertyIsEnumerable("1"));
+assertFalse(o.propertyIsEnumerable("2"));
+
+// Own properties.
+assertTrue(o.propertyIsEnumerable("c"));
+assertFalse(o.propertyIsEnumerable("d"));
+assertTrue(o.propertyIsEnumerable("3"));
+assertFalse(o.propertyIsEnumerable("4"));
+
+// Inherited accessors.
+assertFalse(o.propertyIsEnumerable("f"));
+assertFalse(o.propertyIsEnumerable("g"));
+assertFalse(o.propertyIsEnumerable("11"));
+assertFalse(o.propertyIsEnumerable("12"));
+
+// Own accessors.
+assertTrue(o.propertyIsEnumerable("h"));
+assertFalse(o.propertyIsEnumerable("k"));
+assertTrue(o.propertyIsEnumerable("13"));
+assertFalse(o.propertyIsEnumerable("14"));
+
+// Nonexisting properties.
+assertFalse(o.propertyIsEnumerable("xxx"));
+assertFalse(o.propertyIsEnumerable("999"));
+
+// String object properties.
+var o = Object("string");
+// Non-string property on String object.
+o[10] = 42;
+assertTrue(o.propertyIsEnumerable(10));
+assertFalse(o.propertyIsEnumerable(0));
+
+// Fast elements.
+var o = [1,2,3,4,5];
+assertTrue(o.propertyIsEnumerable(3));
diff --git a/test/mjsunit/regress/regress-1708.js b/test/mjsunit/regress/regress-1708.js
new file mode 100644
index 0000000..ab50e07
--- /dev/null
+++ b/test/mjsunit/regress/regress-1708.js
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test of a very rare corner case where left-trimming an
+// array caused invalid marking bit patterns on lazily swept pages.
+
+// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
+
+(function() {
+  var head = new Array(1);
+  var tail = head;
+
+  // Fill heap to increase old-space size and trigger lazy sweeping on
+  // some of the old-space pages.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+  array = new Array(100);
+  gc(); gc();
+
+  // At this point "array" should have been promoted to old-space and be
+  // located in a lazy swept page with intact marking bits. Now shift
+  // the array to trigger left-trimming operations.
+  assertEquals(100, array.length);
+  for (var i = 0; i < 50; i++) {
+    array.shift();
+  }
+  assertEquals(50, array.length);
+
+  // At this point "array" should have been trimmed from the left with
+  // marking bits being correctly transfered to the new object start.
+  // Scavenging operations cause lazy sweeping to advance and verify
+  // that marking bit patterns are still sane.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+})();
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1711.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1711.js
index 2502b53..15591b1 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1711.js
@@ -25,22 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// string.split needs to evaluate the separator's toString even if limit
+// is 0 because toString may have side effects.
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var side_effect = false;
+var separator = new Object();
+separator.toString = function() {
+  side_effect = true;
+  return undefined;
+}
+'subject'.split(separator, 0);
+assertTrue(side_effect);
diff --git a/test/mjsunit/regress/regress-1713.js b/test/mjsunit/regress/regress-1713.js
new file mode 100644
index 0000000..0af1144
--- /dev/null
+++ b/test/mjsunit/regress/regress-1713.js
@@ -0,0 +1,127 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+  var arr = [];
+  do {arr.push(new Array(1024));} while (--N > 0);
+  return arr;
+}
+
+function AllocateXMb(X) {
+  return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+  var n = this;
+  while (n.next !== null) n = n.next;
+  n.v(O);
+};
+
+function LongList(N, x) {
+  if (N == 0) return new Node(x, null);
+  return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+  for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+function Incremental(O, x) {
+  if (!x) {
+    return;
+  }
+  function CreateCode(i) {
+    var f = new Function("return O.f_" + i);
+    CODE.push(f);
+    f(); // compile
+    f(); // compile
+    f(); // compile
+  }
+
+  for (var i = 0; i < 1e4; i++) CreateCode(i);
+  gc();
+  gc();
+  gc();
+
+  print(">>> 1 <<<");
+
+  L.execute(O);
+
+  try {} catch (e) {}
+
+  L = null;
+  print(">>> 2 <<<");
+  AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+  Incremental(O, x);
+
+  print('f');
+
+  for (var i = 0; i < 5; i++) O.f;
+
+
+  print('g');
+
+  bar(x);
+}
+
+function bar(x) {
+  if (!x) return;
+  %DeoptimizeFunction(foo);
+  AllocateXMb(8);
+  AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-1748.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-1748.js
index 2502b53..e287e55 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-1748.js
@@ -25,22 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Test that /^/ only matches at beginning of string.
+// Bug in x64 caused it to match when executing the RegExp on a part
+// of a string that starts at a multiplum of 256.
 
-var e = new Error();
-assertEquals('Error', e + '');
-
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var str = Array(10000).join("X");
+str.replace(/^|X/g, function(m, i, s) {
+  if (i > 0) assertEquals("X", m, "at position 0x" + i.toString(16));
+});
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1757.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1757.js
index d3f2e35..f7a5516 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1757.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --string-slices --expose-externalize-string
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var a = "abcdefghijklmnopqrstuvqxy"+"z";
+externalizeString(a, true);
+assertEquals('b', a.substring(1).charAt(0));
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-1945.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/regress-1945.js
index d3f2e35..bffc775 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-1945.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --allow-natives-syntax
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+var _d = new Date();
+_d.setHours(0,0,0,0);
+_d.setHours(0,0,0,0);
+%OptimizeFunctionOnNextCall(_d.setHours);
+_d.setHours(0,0,0,0);
diff --git a/test/mjsunit/regress/regress-877615.js b/test/mjsunit/regress/regress-877615.js
index d35aba6..bec5a4d 100644
--- a/test/mjsunit/regress/regress-877615.js
+++ b/test/mjsunit/regress/regress-877615.js
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-Number.prototype.toLocaleString = function() { return 'invalid'};
-assertEquals([1].toLocaleString(), 'invalid');  // invalid
+Number.prototype.toLocaleString = function() { return 'invalid'; };
+assertEquals('invalid', [1].toLocaleString());  // invalid
 
 Number.prototype.toLocaleString = 'invalid';
-assertEquals([1].toLocaleString(), '1');  // 1
+assertThrows(function() { [1].toLocaleString(); });  // Not callable.
 
+delete Number.prototype.toLocaleString;
 Number.prototype.toString = function() { return 'invalid' };
-assertEquals([1].toLocaleString(), '1');  // 1
-assertEquals([1].toString(), '1');        // 1
-
+assertEquals([1].toLocaleString(), 'invalid');  // Uses ToObject on elements.
+assertEquals([1].toString(), '1');        // Uses ToString directly on elements.
diff --git a/test/mjsunit/regress/regress-91517.js b/test/mjsunit/regress/regress-91517.js
deleted file mode 100644
index 68a768c..0000000
--- a/test/mjsunit/regress/regress-91517.js
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Getting property names of an object with a prototype chain that
-// triggers dictionary elements in GetLocalPropertyNames() shouldn't
-// crash the runtime
-
-// Flags: --allow-natives-syntax
-
-function Object1() {
-  this.foo = 1;
-}
-
-function Object2() {
-  this.fuz = 2;
-  this.objects = new Object();
-  this.fuz1 = 2;
-  this.fuz2 = 2;
-  this.fuz3 = 2;
-  this.fuz4 = 2;
-  this.fuz5 = 2;
-  this.fuz6 = 2;
-  this.fuz7 = 2;
-  this.fuz8 = 2;
-  this.fuz9 = 2;
-  this.fuz10 = 2;
-  this.fuz11 = 2;
-  this.fuz12 = 2;
-  this.fuz13 = 2;
-  this.fuz14 = 2;
-  this.fuz15 = 2;
-  this.fuz16 = 2;
-  this.fuz17 = 2;
-  // Force dictionary-based properties
-  for (x=1;x<1000;x++) {
-    this["sdf" + x] = 2;
-  }
-}
-
-function Object3() {
-  this.boo = 3;
-}
-
-function Object4() {
-  this.baz = 4;
-}
-
-obj1 = new Object1();
-obj2 = new Object2();
-obj3 = new Object3();
-obj4 = new Object4();
-
-%SetHiddenPrototype(obj4, obj3);
-%SetHiddenPrototype(obj3, obj2);
-%SetHiddenPrototype(obj2, obj1);
-
-function contains(a, obj) {
-  for(var i = 0; i < a.length; i++) {
-    if(a[i] === obj){
-      return true;
-    }
-  }
-  return false;
-}
-names = %GetLocalPropertyNames(obj4);
-assertEquals(1021, names.length);
-assertTrue(contains(names, "baz"));
-assertTrue(contains(names, "boo"));
-assertTrue(contains(names, "foo"));
-assertTrue(contains(names, "fuz"));
-assertTrue(contains(names, "fuz1"));
-assertTrue(contains(names, "fuz2"));
-assertTrue(contains(names, "fuz3"));
-assertTrue(contains(names, "fuz4"));
-assertTrue(contains(names, "fuz5"));
-assertTrue(contains(names, "fuz6"));
-assertTrue(contains(names, "fuz7"));
-assertTrue(contains(names, "fuz8"));
-assertTrue(contains(names, "fuz9"));
-assertTrue(contains(names, "fuz10"));
-assertTrue(contains(names, "fuz11"));
-assertTrue(contains(names, "fuz12"));
-assertTrue(contains(names, "fuz13"));
-assertTrue(contains(names, "fuz14"));
-assertTrue(contains(names, "fuz15"));
-assertTrue(contains(names, "fuz16"));
-assertTrue(contains(names, "fuz17"));
-assertFalse(names[1020] == undefined);
diff --git a/test/mjsunit/regress/regress-94873.js b/test/mjsunit/regress/regress-94873.js
new file mode 100644
index 0000000..41ca992
--- /dev/null
+++ b/test/mjsunit/regress/regress-94873.js
@@ -0,0 +1,78 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+  return JSON.parse(response);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+
+      var frame = sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "frame"
+      });
+
+      sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "evaluate",
+        arguments: {
+          expression: "obj.x.toString()",
+          additional_context: [{
+            name: "obj",
+            handle: frame.body.receiver.ref
+          }]
+        }
+      });
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+Debug.setListener(listener);
+
+function a(x, y) {
+  this.x = x;
+  this.y = y;
+}
+
+Debug.setBreakPoint(a, 0, 0);
+new a(1, 2);
\ No newline at end of file
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-98773.js
similarity index 80%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-98773.js
index 2502b53..eb24eb5 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-98773.js
@@ -25,22 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Calling Array.sort on an external array is not supposed to crash.
 
-var e = new Error();
-assertEquals('Error', e + '');
+var array = new Int16Array(23);
+array[7] = 7; array[9] = 9;
+assertEquals(23, array.length);
+assertEquals(7, array[7]);
+assertEquals(9, array[9]);
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
-
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+Array.prototype.sort.call(array);
+assertEquals(23, array.length);
+assertEquals(7, array[21]);
+assertEquals(9, array[22]);
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/regress-99167.js
similarity index 85%
rename from test/mjsunit/regress/regress-221.js
rename to test/mjsunit/regress/regress-99167.js
index d3f2e35..5053ae5 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/regress-99167.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+// Flags: --expose-gc --max-new-space-size=1024
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+eval("function Node() { this.a = 1; this.a = 3; }");
+new Node;
+for (var i = 0; i < 4; ++i) gc();
+for (var i = 0; i < 100000; ++i) new Node;
diff --git a/test/mjsunit/cyclic-error-to-string.js b/test/mjsunit/regress/regress-crbug-107996.js
similarity index 64%
copy from test/mjsunit/cyclic-error-to-string.js
copy to test/mjsunit/regress/regress-crbug-107996.js
index 2502b53..dfe07e5 100644
--- a/test/mjsunit/cyclic-error-to-string.js
+++ b/test/mjsunit/regress/regress-crbug-107996.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,22 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test printing of cyclic errors which return the empty string for
-// compatibility with Safari and Firefox.
+// Flags: --expose-debug-as debug
 
-var e = new Error();
-assertEquals('Error', e + '');
+Debug = debug.Debug;
 
-e = new Error();
-e.name = e;
-e.message = e;
-e.stack = e;
-e.arguments = e;
-assertEquals(': ', e + '');
+Debug.setListener(listener);
 
-e = new Error();
-e.name = [ e ];
-e.message = [ e ];
-e.stack = [ e ];
-e.arguments = [ e ];
-assertEquals(': ', e + '');
+var fourteen;
+var four_in_debugger = [];
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    for (var i = 0; i < exec_state.frameCount(); i++) {
+      var frame = exec_state.frame(i);
+      four_in_debugger[i] = frame.evaluate("four", false).value();
+    }
+  }
+}
+
+function f1() {
+  var three = 3;
+  var four = 4;
+  (function f2() {
+     var seven = 7;
+     (function f3() {
+        debugger;
+        fourteen = three + four + seven;
+     })();
+  })();
+}
+
+f1();
+assertEquals(14, fourteen);
+assertEquals(4, four_in_debugger[0]);
+assertEquals(4, four_in_debugger[1]);
+assertEquals(4, four_in_debugger[2]);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-deopt-gc.js b/test/mjsunit/regress/regress-deopt-gc.js
index 7b7c29a..a74e2c5 100644
--- a/test/mjsunit/regress/regress-deopt-gc.js
+++ b/test/mjsunit/regress/regress-deopt-gc.js
@@ -42,7 +42,7 @@
   // Make sure we don't inline this function
   try { var a = 42; } catch(o) {};
   %DeoptimizeFunction(opt_me);
-  gc(true);
+  gc();
 }
 
 
diff --git a/test/mjsunit/regress/regress-221.js b/test/mjsunit/regress/short-circuit.js
similarity index 85%
copy from test/mjsunit/regress/regress-221.js
copy to test/mjsunit/regress/short-circuit.js
index d3f2e35..25363d6 100644
--- a/test/mjsunit/regress/regress-221.js
+++ b/test/mjsunit/regress/short-circuit.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that direct eval calls handle the case where eval has been
-// deleted correctly.
+var arr = [];
 
-// See http://code.google.com/p/v8/issues/detail?id=221
-
-assertThrows('eval(delete eval)');
-
+for (var i = 0; i < 28000; i++) {
+  arr.push(new RegExp("prefix" + i.toString() + i.toString() + i.toString()));
+}
diff --git a/test/mjsunit/stack-traces-2.js b/test/mjsunit/stack-traces-2.js
new file mode 100644
index 0000000..165c4df
--- /dev/null
+++ b/test/mjsunit/stack-traces-2.js
@@ -0,0 +1,87 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --builtins-in-stack-traces
+
+
+// Poisonous object that throws a reference error if attempted converted to
+// a primitive values.
+var thrower = { valueOf: function() { FAIL; },
+                toString: function() { FAIL; } };
+
+// Tests that a native constructor function is included in the
+// stack trace.
+function testTraceNativeConstructor(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    new nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+
+function testNotOmittedBuiltin(throwing, included) {
+  try {
+    throwing();
+    assertUnreachable(included);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(included) >= 0, included);
+  }
+}
+
+
+testTraceNativeConversion(String);  // Does ToString on argument.
+testTraceNativeConversion(Number);  // Does ToNumber on argument.
+testTraceNativeConversion(RegExp);  // Does ToString on argument.
+
+testTraceNativeConstructor(String);  // Does ToString on argument.
+testTraceNativeConstructor(Number);  // Does ToNumber on argument.
+testTraceNativeConstructor(RegExp);  // Does ToString on argument.
+testTraceNativeConstructor(Date);    // Does ToNumber on argument.
+
+// QuickSort has builtins object as receiver, and is non-native
+// builtin. Should not be omitted with the --builtins-in-stack-traces flag.
+testNotOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
+                                                     (b < a) - (a < b); });
+                      }, "QuickSort");
+
+// Not omitted even though ADD from runtime.js is a non-native builtin.
+testNotOmittedBuiltin(function(){ thrower + 2; }, "ADD");
\ No newline at end of file
diff --git a/test/mjsunit/stack-traces.js b/test/mjsunit/stack-traces.js
index 47a5cc5..536e71b 100644
--- a/test/mjsunit/stack-traces.js
+++ b/test/mjsunit/stack-traces.js
@@ -194,6 +194,46 @@
 }
 
 
+// Poisonous object that throws a reference error if attempted converted to
+// a primitive values.
+var thrower = { valueOf: function() { FAIL; },
+                toString: function() { FAIL; } };
+
+// Tests that a native constructor function is included in the
+// stack trace.
+function testTraceNativeConstructor(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    new nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+  var nativeFuncName = nativeFunc.name;
+  try {
+    nativeFunc(thrower);
+    assertUnreachable(nativeFuncName);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+  }
+}
+
+
+function testOmittedBuiltin(throwing, omitted) {
+  try {
+    throwing();
+    assertUnreachable(omitted);
+  } catch (e) {
+    assertTrue(e.stack.indexOf(omitted) < 0, omitted);
+  }
+}
+
+
 testTrace("testArrayNative", testArrayNative, ["Array.map (native)"]);
 testTrace("testNested", testNested, ["at one", "at two", "at three"]);
 testTrace("testMethodNameInference", testMethodNameInference, ["at Foo.bar"]);
@@ -217,3 +257,21 @@
 testCallerCensorship();
 testUnintendedCallerCensorship();
 testErrorsDuringFormatting();
+
+testTraceNativeConversion(String);  // Does ToString on argument.
+testTraceNativeConversion(Number);  // Does ToNumber on argument.
+testTraceNativeConversion(RegExp);  // Does ToString on argument.
+
+testTraceNativeConstructor(String);  // Does ToString on argument.
+testTraceNativeConstructor(Number);  // Does ToNumber on argument.
+testTraceNativeConstructor(RegExp);  // Does ToString on argument.
+testTraceNativeConstructor(Date);    // Does ToNumber on argument.
+
+// Omitted because QuickSort has builtins object as receiver, and is non-native
+// builtin.
+testOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
+                                                     (b < a) - (a < b); });
+                   }, "QuickSort");
+
+// Omitted because ADD from runtime.js is non-native builtin.
+testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
\ No newline at end of file
diff --git a/test/mjsunit/strict-mode-implicit-receiver.js b/test/mjsunit/strict-mode-implicit-receiver.js
index 338f6d1..8284edd 100644
--- a/test/mjsunit/strict-mode-implicit-receiver.js
+++ b/test/mjsunit/strict-mode-implicit-receiver.js
@@ -168,12 +168,7 @@
 outer_eval_conversion3(strict_eval, 'undefined');
 outer_eval_conversion3(non_strict_eval, 'object');
 
-// TODO(ager): I'm not sure this is in accordance with the spec. At
-// the moment, any call to eval where eval is not bound in the global
-// context is treated as an indirect call to eval which means that the
-// global context is used and the global object is passed as the
-// receiver.
-outer_eval_conversion3(eval, 'object');
+outer_eval_conversion3(eval, 'undefined');
 
 function test_constant_function() {
   var o = { f: function() { "use strict"; return this; } };
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index 30234ba..9c9bdfd 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -1051,14 +1051,20 @@
   }
   assertThrows(function() { strict.caller; }, TypeError);
   assertThrows(function() { strict.arguments; }, TypeError);
+  assertThrows(function() { strict.caller = 42; }, TypeError);
+  assertThrows(function() { strict.arguments = 42; }, TypeError);
 
   var another = new Function("'use strict'");
   assertThrows(function() { another.caller; }, TypeError);
   assertThrows(function() { another.arguments; }, TypeError);
+  assertThrows(function() { another.caller = 42; }, TypeError);
+  assertThrows(function() { another.arguments = 42; }, TypeError);
 
   var third = (function() { "use strict"; return function() {}; })();
   assertThrows(function() { third.caller; }, TypeError);
   assertThrows(function() { third.arguments; }, TypeError);
+  assertThrows(function() { third.caller = 42; }, TypeError);
+  assertThrows(function() { third.arguments = 42; }, TypeError);
 
   CheckPillDescriptor(strict, "caller");
   CheckPillDescriptor(strict, "arguments");
diff --git a/test/mjsunit/string-external-cached.js b/test/mjsunit/string-external-cached.js
new file mode 100644
index 0000000..12312ac
--- /dev/null
+++ b/test/mjsunit/string-external-cached.js
@@ -0,0 +1,94 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-externalize-string --expose-gc
+// Test data pointer caching of external strings.
+
+function test() {
+  // Test string.charAt.
+  var charat_str = new Array(5);
+  charat_str[0] = "0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\
+0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF";
+  charat_str[1] = "0123456789ABCDEF";
+  for (var i = 0; i < 6; i++) charat_str[1] += charat_str[1];
+  try {  // String can only be externalized once
+    externalizeString(charat_str[0], false);
+    externalizeString(charat_str[1], true);
+  } catch (ex) { }
+  charat_str[2] = charat_str[0].slice(0, -1);
+  charat_str[3] = charat_str[1].slice(0, -1);
+  charat_str[4] = charat_str[0] + charat_str[0];
+
+  for (var i = 0; i < 5; i++) {
+    assertEquals('B', charat_str[i].charAt(6*16 + 11));
+    assertEquals('C', charat_str[i].charAt(6*16 + 12));
+    assertEquals('A', charat_str[i].charAt(3*16 + 10));
+    assertEquals('B', charat_str[i].charAt(3*16 + 11));
+  }
+
+  charat_short = "012";
+  try {  // String can only be externalized once
+    externalizeString(charat_short, true);
+  } catch (ex) { }
+  assertEquals("1", charat_short.charAt(1));
+
+  // Test regexp.
+  var re = /(A|B)/;
+  var rere = /(T.{1,2}B)/;
+  var ascii = "ABCDEFGHIJKLMNOPQRST";
+  var twobyte = "_ABCDEFGHIJKLMNOPQRST";
+  try {
+    externalizeString(ascii, false);
+    externalizeString(twobyte, true);
+  } catch (ex) { }
+  assertTrue(isAsciiString(ascii));
+  assertFalse(isAsciiString(twobyte));
+  var ascii_slice = ascii.slice(1,-1);
+  var twobyte_slice = twobyte.slice(2,-1);
+  var ascii_cons = ascii + ascii;
+  var twobyte_cons = twobyte + twobyte;
+  for (var i = 0; i < 2; i++) {
+    assertEquals(["A", "A"], re.exec(ascii));
+    assertEquals(["B", "B"], re.exec(ascii_slice));
+    assertEquals(["TAB", "TAB"], rere.exec(ascii_cons));
+    assertEquals(["A", "A"], re.exec(twobyte));
+    assertEquals(["B", "B"], re.exec(twobyte_slice));
+    assertEquals(["T_AB", "T_AB"], rere.exec(twobyte_cons));
+  }
+}
+
+// Run the test many times to ensure IC-s don't break things.
+for (var i = 0; i < 10; i++) {
+  test();
+}
+
+// Clean up string to make Valgrind happy.
+gc();
+gc();
diff --git a/test/mjsunit/string-externalize.js b/test/mjsunit/string-externalize.js
index da89786..d52a7e2 100644
--- a/test/mjsunit/string-externalize.js
+++ b/test/mjsunit/string-externalize.js
@@ -44,7 +44,7 @@
   assertFalse(isAsciiString(twoByteExternalWithAsciiData));
 
   var realTwoByteExternalString =
-      "\u1234\u1234" + (function() { return "\u1234"; })();
+      "\u1234\u1234\u1234\u1234" + (function() { return "\u1234"; })();
   externalizeString(realTwoByteExternalString);
   assertFalse(isAsciiString(realTwoByteExternalString));
 
diff --git a/test/mjsunit/string-slices-regexp.js b/test/mjsunit/string-slices-regexp.js
index a8cadae..98b8ef9 100644
--- a/test/mjsunit/string-slices-regexp.js
+++ b/test/mjsunit/string-slices-regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -24,11 +24,6 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Flags: --string-slices
-
-//assertEquals('345"12345 6"1234567"123',
-//             '12345""12345 6""1234567""1234'.slice(2,-1).replace(/""/g, '"'));
 
 var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
 for(var i = 0; i < 1000; i++) {
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
index 8cc1f81..3eb30f1 100755
--- a/test/mjsunit/string-slices.js
+++ b/test/mjsunit/string-slices.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --string-slices --expose-externalize-string
+// Flags: --expose-externalize-string --allow-natives-syntax
 
 var s = 'abcdefghijklmn';
 assertEquals(s, s.substr());
@@ -100,14 +100,7 @@
 
 // Keep creating strings to to force allocation failure on substring creation.
 var x = "0123456789ABCDEF";
-x += x;  // 2^5
-x += x;
-x += x;
-x += x;
-x += x;
-x += x;  // 2^10
-x += x;
-x += x;
+for (var i = 0; i < 8; i++) x += x;
 var xl = x.length;
 var cache = [];
 for (var i = 0; i < 1000; i++) {
@@ -119,14 +112,7 @@
 
 // Same with two-byte strings
 var x = "\u2028123456789ABCDEF";
-x += x;  // 2^5
-x += x;
-x += x;
-x += x;
-x += x;
-x += x;  // 2^10
-x += x;
-x += x;
+for (var i = 0; i < 8; i++) x += x;
 var xl = x.length;
 var cache = [];
 for (var i = 0; i < 1000; i++) {
@@ -189,11 +175,34 @@
 assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
     utf.substring(5,1) + utf.substring(3,7));
 
-/*
 // Externalizing strings.
-var a = "123456789qwertyuiopasdfghjklzxcvbnm";
-var b = a.slice(1,-1);
+var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
+var b = "23456789qwertyuiopasdfghjklzxcvbn"
 assertEquals(a.slice(1,-1), b);
-externalizeString(a);
+
+assertTrue(isAsciiString(a));
+externalizeString(a, true);
+assertFalse(isAsciiString(a));
+
 assertEquals(a.slice(1,-1), b);
-*/
+assertTrue(/3456789qwe/.test(a));
+assertEquals(5, a.indexOf("678"));
+assertEquals("12345", a.split("6")[0]);
+
+// Create a slice with an external string as parent string.
+var c = a.slice(1,-1);
+
+function test_crankshaft() {
+  for (var i = 0; i < 20; i++) {
+    assertEquals(b.charAt(i), a.charAt(i + 1));
+    assertEquals(b.charAt(i), c.charAt(i));
+    assertEquals(b.charAt(4), c.charAt(4));
+    assertTrue(/3456789qwe/.test(c));
+    assertEquals(4, c.indexOf("678"));
+    assertEquals("2345", c.split("6")[0]);
+  }
+}
+
+test_crankshaft();
+%OptimizeFunctionOnNextCall(test_crankshaft);
+test_crankshaft();
\ No newline at end of file
diff --git a/test/mjsunit/switch.js b/test/mjsunit/switch.js
index 180f994..6a61fe5 100644
--- a/test/mjsunit/switch.js
+++ b/test/mjsunit/switch.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 function f0() {
   switch (0) {
     // switch deliberately left empty
@@ -126,6 +128,42 @@
 assertEquals(3, f4(2), "fallthrough-switch.2");
 assertEquals(5, f4(3), "fallthrough-switch.3");
 
+function f4_string(tag, x) {
+  switch(tag) {
+    case 'zero':
+      x++;
+    case 'two':
+      x++;
+  }
+  return x;
+}
+
+// Symbols
+assertEquals(2, f4_string('zero', 0), "fallthrough-string-switch.0");
+assertEquals(1, f4_string('one', 1), "fallthrough-string-switch.1");
+assertEquals(3, f4_string('two', 2), "fallthrough-string-switch.2");
+
+// Strings
+assertEquals(2, f4_string('_zero'.slice(1), 0), "fallthrough-string-switch.3");
+assertEquals(1, f4_string('_one'.slice(1), 1), "fallthrough-string-switch.4");
+assertEquals(3, f4_string('_two'.slice(1), 2), "fallthrough-string-switch.5");
+
+// Oddball
+assertEquals(3, f4_string(null, 3), "fallthrough-string-switch.6");
+
+// Test for regression
+function regress_string(value) {
+  var json = 1;
+  switch (typeof value) {
+    case 'object':
+      break;
+
+    default:
+
+  }
+  return json;
+};
+assertEquals(1, regress_string('object'), 'regression-string');
 
 function f5(x) {
   switch(x) {
@@ -287,3 +325,138 @@
 var verylong = makeVeryLong(verylong_size);
 
 assertEquals(verylong_size * 2 + 1, verylong());
+
+//
+// Test suite below aims to cover all possible combinations of following:
+//
+//  clauses  |   tags   |   type feedback   |  optimization
+// =========================================================
+//  strings  |  symbol  |     all           |      on
+//  smis     |  string  |     target        |      off
+//  mixed    |  oddball |     non-target    |
+//           |  smis    |     none          |
+//           |  heapnum |                   |
+// =========================================================
+
+// Function-with-switch generator
+var test_id = 0,
+    clause_values = {
+      string: ['abc', 'def', 'ghi', 'jkl'],
+      smi: [1, 2, 3, 4],
+      mixed: ['abc', 1, 'def', 2, 'ghi', 3, 'jkl', 4]
+    };
+
+function switch_gen(clause_type, feedback, optimize) {
+  var values = clause_values[clause_type];
+
+  function opt(fn) {
+    if (feedback === 'all') {
+      values.forEach(fn);
+    } else if (Array.isArray(feedback)) {
+      // Non-target
+      values.filter(function(v) {
+        return feedback.indexOf(v) === -1;
+      }).forEach(fn);
+    } else if (feedback !== undefined) {
+      // Target
+      fn(feedback);
+    } else {
+      // None
+    }
+
+    if (optimize) %OptimizeFunctionOnNextCall(fn);
+
+    return fn;
+  };
+
+  return opt(new Function(
+      'tag',
+      '"' + (test_id++) + '";' +
+      'switch(tag) {' +
+      values.map(function(value) {
+        return 'case ' + JSON.stringify(value) + ': return' +
+               JSON.stringify('ok-' + value);
+      }).join(';') +
+      '}'
+  ));
+};
+
+function test_switch(clause_type, test_type, feedback, optimize) {
+  var pairs = [],
+      fn = switch_gen(clause_type, feedback, optimize);
+
+  if (Array.isArray(test_type)) {
+    pairs = test_type.map(function(v) {
+      return {
+        value: v,
+        expected: 'ok-' + v
+      };
+    });
+  } else if (test_type === 'symbols') {
+    pairs = clause_values.string.map(function(v) {
+      return {
+        value: v,
+        expected: clause_type !== 'smi' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'strings') {
+    pairs = clause_values.string.map(function(v) {
+      return {
+        value: ('%%' + v).slice(2),
+        expected: clause_type !== 'smi' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'oddball') {
+    pairs = [
+      { value: null, expected: undefined },
+      { value: NaN, expected: undefined },
+      { value: undefined, expected: undefined }
+    ];
+  } else if (test_type === 'smi') {
+    pairs = clause_values.smi.map(function(v) {
+      return {
+        value: v,
+        expected: clause_type !== 'string' ? 'ok-' + v : undefined
+      };
+    });
+  } else if (test_type === 'heapnum') {
+    pairs = clause_values.smi.map(function(v) {
+      return {
+        value: ((v * 17)/16) - ((v*17)%16/16),
+        expected: clause_type !== 'string' ? 'ok-' + v : undefined
+      };
+    });
+  }
+
+  pairs.forEach(function(pair) {
+    assertEquals(fn(pair.value), pair.expected);
+  });
+};
+
+// test_switch(clause_type, test_type, feedback, optimize);
+
+function test_switches(opt) {
+  var test_types = ['symbols', 'strings', 'oddball', 'smi', 'heapnum'];
+
+  function test(clause_type) {
+    var values = clause_values[clause_type];
+
+    test_types.forEach(function(test_type) {
+      test_switch(clause_type, test_type, 'all', opt);
+      test_switch(clause_type, test_type, 'none', opt);
+
+      // Targeting specific clause feedback
+      values.forEach(function(value) {
+        test_switch(clause_type, test_type, [value], value, opt);
+        test_switch(clause_type, test_type, value, value, opt);
+      });
+    });
+  };
+
+  test('string');
+  test('smi');
+  test('mixed');
+};
+
+test_switches(false);
+test_switches(true);
diff --git a/test/mjsunit/to_number_order.js b/test/mjsunit/to_number_order.js
index d17e600..50e4bc7 100644
--- a/test/mjsunit/to_number_order.js
+++ b/test/mjsunit/to_number_order.js
@@ -161,7 +161,7 @@
 
 x = "";
 assertFalse(a > b, "Compare objects a > b");
-assertEquals("fiskhest", x, "Compare objects a > b valueOf order");
+assertEquals("hestfisk", x, "Compare objects a > b valueOf order");
 
 x = "";
 assertFalse(a > void(0), "Compare objects a > undefined");
@@ -195,7 +195,7 @@
 
   x = "";
   assertFalse(a > b, "Compare objects a > b");
-  assertEquals("fiskhest", x, "Compare objects a > b valueOf order");
+  assertEquals("hestfisk", x, "Compare objects a > b valueOf order");
 
   x = "";
   assertFalse(a > void(0), "Compare objects a > undefined");
diff --git a/test/mjsunit/unbox-double-arrays.js b/test/mjsunit/unbox-double-arrays.js
index feecaec..fd7db28 100644
--- a/test/mjsunit/unbox-double-arrays.js
+++ b/test/mjsunit/unbox-double-arrays.js
@@ -77,8 +77,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -89,8 +87,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -101,8 +97,6 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -113,20 +107,20 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
 
   function test_various_loads5(a, value_5, value_6, value_7) {
     assertTrue(%HasFastDoubleElements(a));
-    assertEquals(value_5, a[5]);
-    assertEquals(value_6, a[6]);
-    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    if (value_5 != undefined) {
+      assertEquals(value_5, a[5]);
+    };
+    if (value_6 != undefined) {
+      assertEquals(value_6, a[6]);
+      assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    }
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -137,8 +131,16 @@
     assertEquals(value_6, a[6]);
     assertEquals(value_6, a[computed_6()]); // Test non-constant key
     assertEquals(value_7, a[7]);
-    assertEquals(undefined, a[large_array_size-1]);
-    assertEquals(undefined, a[-1]);
+    assertEquals(large_array_size, a.length);
+    assertTrue(%HasFastDoubleElements(a));
+  }
+
+  function test_various_loads7(a, value_5, value_6, value_7) {
+    assertTrue(%HasFastDoubleElements(a));
+    assertEquals(value_5, a[5]);
+    assertEquals(value_6, a[6]);
+    assertEquals(value_6, a[computed_6()]); // Test non-constant key
+    assertEquals(value_7, a[7]);
     assertEquals(large_array_size, a.length);
     assertTrue(%HasFastDoubleElements(a));
   }
@@ -248,6 +250,8 @@
                       expected_array_value(7));
 
   // Make sure Crankshaft code handles the hole correctly (bailout)
+  var large_array = new allocator(large_array_size);
+  force_to_fast_double_array(large_array);
   test_various_stores(large_array,
                       expected_array_value(5),
                       expected_array_value(6),
@@ -273,7 +277,12 @@
                       undefined,
                       expected_array_value(7));
 
+  %DeoptimizeFunction(test_various_loads6);
+  gc();
+
   // Test stores for non-NaN.
+  var large_array = new allocator(large_array_size);
+  force_to_fast_double_array(large_array);
   %OptimizeFunctionOnNextCall(test_various_stores);
   test_various_stores(large_array,
                       expected_array_value(5),
@@ -285,7 +294,19 @@
                       expected_array_value(6),
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  test_various_loads7(large_array,
+                      expected_array_value(5),
+                      expected_array_value(6),
+                      expected_array_value(7));
+
+  %OptimizeFunctionOnNextCall(test_various_loads7);
+
+  test_various_loads7(large_array,
                       expected_array_value(5),
                       expected_array_value(6),
                       expected_array_value(7));
@@ -301,7 +322,7 @@
                       -NaN,
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
                       NaN,
                       -NaN,
                       expected_array_value(7));
@@ -317,7 +338,7 @@
                       -Infinity,
                       expected_array_value(7));
 
-  test_various_loads6(large_array,
+  test_various_loads7(large_array,
                       Infinity,
                       -Infinity,
                       expected_array_value(7));
@@ -434,7 +455,6 @@
 large_array3[4] = -Infinity;
 
 function call_apply() {
-  assertTrue(%HasFastDoubleElements(large_array3));
   called_by_apply.apply({}, large_array3);
 }
 
@@ -449,7 +469,6 @@
 function test_for_in() {
   // Due to previous tests, keys 0..25 and 95 should be present.
   next_expected = 0;
-  assertTrue(%HasFastDoubleElements(large_array3));
   for (x in large_array3) {
     assertTrue(next_expected++ == x);
     if (next_expected == 25) {
diff --git a/test/mjsunit/undeletable-functions.js b/test/mjsunit/undeletable-functions.js
index 04fd060..635ea6f 100644
--- a/test/mjsunit/undeletable-functions.js
+++ b/test/mjsunit/undeletable-functions.js
@@ -25,11 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test that we match JSC in making some functions undeletable.
-// See http://code.google.com/p/chromium/issues/detail?id=1717
-// The functions on these prototypes are not just undeletable.  It is
-// possible to override them with new definitions, then get the old
-// version back by deleting the new definition.
+// Test that we match ECMAScript in making most builtin functions
+// deletable and only specific ones undeletable or read-only.
 
 var array;
 
@@ -37,7 +34,7 @@
   "toString", "toLocaleString", "join", "pop", "push", "concat", "reverse",
   "shift", "unshift", "slice", "splice", "sort", "filter", "forEach", "some",
   "every", "map", "indexOf", "lastIndexOf", "reduce", "reduceRight"];
-CheckJSCSemantics(Array.prototype, array, "Array prototype");
+CheckEcmaSemantics(Array.prototype, array, "Array prototype");
 
 var old_Array_prototype = Array.prototype;
 var new_Array_prototype = {};
@@ -57,12 +54,12 @@
   "setUTCMinutes", "setHours", "setUTCHours", "setDate", "setUTCDate",
   "setMonth", "setUTCMonth", "setFullYear", "setUTCFullYear", "toGMTString",
   "toUTCString", "getYear", "setYear", "toISOString", "toJSON"];
-CheckJSCSemantics(Date.prototype, array, "Date prototype");
+CheckEcmaSemantics(Date.prototype, array, "Date prototype");
 
 array = [
   "random", "abs", "acos", "asin", "atan", "ceil", "cos", "exp", "floor", "log",
   "round", "sin", "sqrt", "tan", "atan2", "pow", "max", "min"];
-CheckJSCSemantics(Math, array, "Math1");
+CheckEcmaSemantics(Math, array, "Math1");
 
 CheckEcmaSemantics(Date, ["UTC", "parse", "now"], "Date");
 
@@ -76,6 +73,8 @@
   "execScript"];
 CheckEcmaSemantics(this, array, "Global");
 CheckReadOnlyAttr(this, "Infinity");
+CheckReadOnlyAttr(this, "NaN");
+CheckReadOnlyAttr(this, "undefined");
 
 array = ["exec", "test", "toString", "compile"];
 CheckEcmaSemantics(RegExp.prototype, array, "RegExp prototype");
@@ -112,7 +111,7 @@
   "toUpperCase", "toLocaleUpperCase", "link", "anchor", "fontcolor", "fontsize",
   "big", "blink", "bold", "fixed", "italics", "small", "strike", "sub", "sup",
   "toJSON", "toString", "valueOf"];
-CheckJSCSemantics(String.prototype, array, "String prototype");
+CheckEcmaSemantics(String.prototype, array, "String prototype");
 CheckEcmaSemantics(String, ["fromCharCode"], "String");
 
 
@@ -124,14 +123,6 @@
 }
 
 
-function CheckJSCSemantics(type, props, name) {
-  print(name);
-  for (var i = 0; i < props.length; i++) {
-    CheckNotDeletable(type, props[i]);
-  }
-}
-
-
 function CheckDontDelete(type, props, name) {
   print(name);
   for (var i = 0; i < props.length; i++) {
@@ -154,21 +145,6 @@
 }
 
 
-function CheckNotDeletable(type, prop) {
-  var old = type[prop];
-  if (!type[prop]) return;
-  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
-  var deleted = delete type[prop];
-  assertTrue(deleted, "delete operator returned false: " + prop);
-  assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
-  type[prop] = "foo";
-  assertEquals("foo", type[prop], "not overwritable: " + prop);
-  deleted = delete type[prop];
-  assertTrue(deleted, "delete operator returned false 2nd time: " + prop);
-  assertEquals(old.toString(), type[prop].toString(), "delete didn't restore the old value: " + prop);
-}
-
-
 function CheckDontDeleteAttr(type, prop) {
   var old = type[prop];
   if (!type[prop]) return;
@@ -189,7 +165,7 @@
   assertFalse(deleted, "delete operator returned true: " + prop);
   assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
   type[prop] = "foo";
-  assertEquals("foo", type[prop], "overwritable: " + prop);
+  assertEquals(old, type[prop], "overwritable: " + prop);
 }
 
 print("OK");
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 3a27130..e31a630 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -1,4 +1,4 @@
-# Copyright 2009 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -69,7 +69,6 @@
 ecma_3/Date/15.9.3.2-1: SKIP
 js1_2/function/Number: SKIP
 
-
 ##################### SLOW TESTS #####################
 
 # This takes a long time to run (~100 seconds). It should only be run
@@ -227,7 +226,7 @@
 ecma/String/15.5.4.12-5: FAIL_OK
 
 # Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: FAIL || TIMEOUT
+js1_5/Regress/regress-312588: SKIP
 
 
 # Runs out of memory because it compiles huge functions.
@@ -301,6 +300,11 @@
 js1_2/regexp/beginLine: FAIL_OK
 js1_2/regexp/endLine: FAIL_OK
 
+# We no longer let calls to test and exec with no argument implicitly
+# use the previous input.
+js1_2/regexp/RegExp_input: FAIL_OK
+js1_2/regexp/RegExp_input_as_array: FAIL_OK
+
 
 # To be compatible with safari typeof a regexp yields 'function';
 # in firefox it yields 'object'.
@@ -411,12 +415,6 @@
 js1_5/extensions/regress-455413: FAIL_OK
 
 
-# The spec specifies reverse evaluation order for < and >=.
-# See section 11.8.2 and 11.8.5.
-# We implement the spec here but the test tests the more straigtforward order.
-ecma_3/Operators/order-01: FAIL_OK
-
-
 # Uses Mozilla-specific QName, XML, XMLList and Iterator.
 js1_5/Regress/regress-407323: FAIL_OK
 js1_5/Regress/regress-407957: FAIL_OK
@@ -619,6 +617,10 @@
 # We do not correctly handle assignments within "with"
 /ecma_3/Statements/12.10-01: FAIL
 
+# We do not throw an exception when a const is redeclared.
+# (We only fail section 1 of the test.)
+js1_5/Regress/regress-103602: FAIL
+
 ##################### MOZILLA EXTENSION TESTS #####################
 
 ecma/extensions/15.1.2.1-1: FAIL_OK
diff --git a/test/preparser/strict-identifiers.pyt b/test/preparser/strict-identifiers.pyt
index 72808e2..aa3d521 100644
--- a/test/preparser/strict-identifiers.pyt
+++ b/test/preparser/strict-identifiers.pyt
@@ -138,6 +138,38 @@
   var x = {set foo($id) { }};
 """)
 
+label_normal = Template("label-normal-$id", """
+  $id: '';
+""")
+
+label_strict = StrictTemplate("label-strict-$id", """
+  $id: '';
+""")
+
+break_normal = Template("break-normal-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+break_strict = StrictTemplate("break-strict-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+continue_normal = Template("continue-normal-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
+continue_strict = StrictTemplate("continue-strict-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
 non_strict_use = Template("nonstrict-$id", """
   var $id = 42;
   $id++;
@@ -162,6 +194,7 @@
   function $id($id) { }
   x = {$id: 42};
   x = {get $id() {}, set $id(value) {}};
+  $id: '';
 """)
 
 identifier_name_source = """
@@ -197,6 +230,12 @@
   prefix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_prefix")
   postfix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_postfix")
   postfix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_postfix")
+  label_normal({"id": id}, None)
+  label_strict({"id": id}, None)
+  break_normal({"id": id}, None)
+  break_strict({"id": id}, None)
+  continue_normal({"id": id}, None)
+  continue_strict({"id": id}, None)
   non_strict_use({"id": id}, None)
 
 
@@ -205,10 +244,13 @@
 for reserved_word in reserved_words + strict_reserved_words:
   if (reserved_word in strict_reserved_words):
     message = "strict_reserved_word"
+    label_message = None
   elif (reserved_word == "const"):
     message = "unexpected_token"
+    label_message = message
   else:
     message = "reserved_word"
+    label_message = message
   arg_name_own({"id":reserved_word}, message)
   arg_name_nested({"id":reserved_word}, message)
   setter_arg({"id": reserved_word}, message)
@@ -225,6 +267,19 @@
   read_var({"id": reserved_word}, message)
   identifier_name({"id": reserved_word}, None);
   identifier_name_strict({"id": reserved_word}, None);
+  label_normal({"id": reserved_word}, label_message)
+  break_normal({"id": reserved_word}, label_message)
+  continue_normal({"id": reserved_word}, label_message)
+  if (reserved_word == "const"):
+    # The error message for this case is different because
+    # ParseLabelledStatementOrExpression will try to parse this as an expression
+    # first, effectively disallowing the use in ParseVariableDeclarations, i.e.
+    # the preparser never sees that 'const' was intended to be a label.
+    label_strict({"id": reserved_word}, "strict_const")
+  else:
+    label_strict({"id": reserved_word}, message)
+  break_strict({"id": reserved_word}, message)
+  continue_strict({"id": reserved_word}, message)
 
 
 # Future reserved words in strict mode behave like normal identifiers
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 868509d..fb6d951 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -30,10 +30,6 @@
 
 ############################### BUGS ###################################
 
-# A bound function should fail on access to 'caller' and 'arguments'.
-S15.3.4.5_A1: FAIL
-S15.3.4.5_A2: FAIL
-
 # '__proto__' should be treated as a normal property in JSON.
 S15.12.2_A1: FAIL
 
@@ -46,11 +42,8 @@
 S15.8.2.18_A7: PASS || FAIL_OK
 S15.8.2.13_A23: PASS || FAIL_OK
 
-# We allow calls to regexp exec() with no arguments to fail for
-# compatibility reasons.
-S15.10.6.2_A1_T16: FAIL_OK
-S15.10.6.2_A12: FAIL_OK
-S15.10.6.3_A1_T16: FAIL_OK
+# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
+S15.10.6_A2: FAIL_OK
 
 # We are silent in some regexp cases where the spec wants us to give
 # errors, for compatibility.
@@ -159,6 +152,10 @@
 S9.9_A1: FAIL_OK
 S9.9_A2: FAIL_OK
 
+# The expected evaluation order of comparison operations changed.
+S11.8.2_A2.3_T1: FAIL_OK
+S11.8.3_A2.3_T1: FAIL_OK
+
 # Calls builtins without an explicit receiver which means that
 # undefined is passed to the builtin. The tests expect the global
 # object to be passed which was true in ES3 but not in ES5.
@@ -176,6 +173,23 @@
 S15.5.4.14_A1_T3: FAIL_OK
 S15.5.4.15_A1_T3: FAIL_OK
 
+# NaN, Infinity and undefined are read-only according to ES5.
+S15.1.1.1_A2_T1: FAIL_OK  # NaN
+S15.1.1.1_A2_T2: FAIL_OK  # NaN
+S15.1.1.2_A2_T1: FAIL_OK  # Infinity
+# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
+S15.1.1.3_A2_T1: FAIL_OK  # undefined
+S15.1.1.3_A2_T2: FAIL_OK  # undefined
+
+# Function.prototype.apply can handle arbitrary object as argument list.
+S15.3.4.3_A6_T1: FAIL_OK
+S15.3.4.3_A6_T4: FAIL_OK
+
+# Array.prototype.to[Locale]String is generic in ES5.
+S15.4.4.2_A2_T1: FAIL_OK
+S15.4.4.3_A2_T1: FAIL_OK
+
+
 ##################### SKIPPED TESTS #####################
 
 # These tests take a looong time to run in debug mode.
@@ -194,53 +208,6 @@
 # Invalid test case (recent change adding var changes semantics)
 S15.3_A3_T3: FAIL
 
-# These tests fail because we had to add bugs to be compatible with JSC.  See
-# http://code.google.com/p/chromium/issues/detail?id=1717
-S15.5.4.1_A1_T2: FAIL_OK
-S15.5.4_A1: FAIL_OK
-S15.5.4_A3: FAIL_OK
-S15.9.5.10_A1_T2: FAIL_OK
-S15.9.5.11_A1_T2: FAIL_OK
-S15.9.5.12_A1_T2: FAIL_OK
-S15.9.5.13_A1_T2: FAIL_OK
-S15.9.5.14_A1_T2: FAIL_OK
-S15.9.5.15_A1_T2: FAIL_OK
-S15.9.5.16_A1_T2: FAIL_OK
-S15.9.5.17_A1_T2: FAIL_OK
-S15.9.5.18_A1_T2: FAIL_OK
-S15.9.5.19_A1_T2: FAIL_OK
-S15.9.5.20_A1_T2: FAIL_OK
-S15.9.5.21_A1_T2: FAIL_OK
-S15.9.5.22_A1_T2: FAIL_OK
-S15.9.5.23_A1_T2: FAIL_OK
-S15.9.5.24_A1_T2: FAIL_OK
-S15.9.5.25_A1_T2: FAIL_OK
-S15.9.5.26_A1_T2: FAIL_OK
-S15.9.5.27_A1_T2: FAIL_OK
-S15.9.5.28_A1_T2: FAIL_OK
-S15.9.5.29_A1_T2: FAIL_OK
-S15.9.5.2_A1_T2: FAIL_OK
-S15.9.5.30_A1_T2: FAIL_OK
-S15.9.5.31_A1_T2: FAIL_OK
-S15.9.5.32_A1_T2: FAIL_OK
-S15.9.5.33_A1_T2: FAIL_OK
-S15.9.5.34_A1_T2: FAIL_OK
-S15.9.5.35_A1_T2: FAIL_OK
-S15.9.5.36_A1_T2: FAIL_OK
-S15.9.5.37_A1_T2: FAIL_OK
-S15.9.5.38_A1_T2: FAIL_OK
-S15.9.5.39_A1_T2: FAIL_OK
-S15.9.5.3_A1_T2: FAIL_OK
-S15.9.5.40_A1_T2: FAIL_OK
-S15.9.5.41_A1_T2: FAIL_OK
-S15.9.5.42_A1_T2: FAIL_OK
-S15.9.5.4_A1_T2: FAIL_OK
-S15.9.5.5_A1_T2: FAIL_OK
-S15.9.5.6_A1_T2: FAIL_OK
-S15.9.5.7_A1_T2: FAIL_OK
-S15.9.5.8_A1_T2: FAIL_OK
-S15.9.5.9_A1_T2: FAIL_OK
-
 [ $arch == arm ]
 
 # BUG(3251225): Tests that timeout with --nocrankshaft.
diff --git a/test/test262/README b/test/test262/README
index ea6b4a7..094356f 100644
--- a/test/test262/README
+++ b/test/test262/README
@@ -4,11 +4,11 @@
 
   http://hg.ecmascript.org/tests/test262
 
-at revision 128 as 'data' in this directory.  Using later version
+at revision 271 as 'data' in this directory.  Using later version
 may be possible but the tests are only known to pass (and indeed run)
 with that revision.
 
-hg clone -r 128 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 271 http://hg.ecmascript.org/tests/test262 data
 
 If you do update to a newer revision you may have to change the test
 harness adapter code since it uses internal functionality from the
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 8cee210..2ad5746 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -25,491 +25,368 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+prefix test262
+def FAIL_OK = FAIL, OKAY
 
-#
-# ietestcenter tests.
-#
+############################### BUGS ###################################
 
-prefix ietestcenter
+# '__proto__' should be treated as a normal property in JSON.
+S15.12.2_A1: FAIL
 
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196
+S8.7_A5_T2: FAIL
 
-#
-# Deliberate differences for compatibility with other browsers
-#
-# 15.9.5.43-0-9 and 15.9.5.43-0-10. V8 doesn't throw RangeError
-# from Date.prototype.toISOString when string is not a finite number.
-# This is compatible with Firefox and Safari.
-15.9.5.43-0-9: PASS || FAIL
-15.9.5.43-0-10: PASS || FAIL
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1624
+S10.4.2.1_A1: FAIL
 
-#
-# Unanalyzed failures which may be bugs or deliberate differences
-#
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1530
+S15.3.3.1_A4: FAIL
 
-# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
-#      Property of the Global Object under strict mode (NaN)
-10.2.1.1.3-4-16-s: FAIL
-# Bug? Strict Mode - TypeError is thrown when changing the value of a Value
-#      Property of the Global Object under strict mode (undefined)
-10.2.1.1.3-4-18-s: FAIL
-# Invalid test: https://bugs.ecmascript.org/show_bug.cgi?id=76
-10.4.2-2-c-1: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: valueOf > valueOf
-11.8.2-1: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: valueOf > toString
-11.8.2-2: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#      when using Greater-than operator: toString > valueOf
-11.8.2-3: FAIL
-# BUG: 11.8.2 Greater-than Operator - Partial left to right order enforced
-#       when using Greater-than operator: toString > toString
-11.8.2-4: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= valueOf
-11.8.3-1: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= toString
-11.8.3-2: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: toString <= valueOf
-11.8.3-3: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: toString <= toString
-11.8.3-4: FAIL
-# BUG: 11.8.3 Less-than-or-equal Operator - Partial left to right order
-#      enforced when using Less-than-or-equal operator: valueOf <= valueOf
-11.8.3-5: FAIL
-# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
-#      in strict mode (Global.undefined)
-11.13.1-4-27-s: FAIL
-# Bug? simple assignment throws TypeError if LeftHandSide is a readonly property
-#      in strict mode (Global.Infinity)
-11.13.1-4-3-s: FAIL
-# BUG: Global.NaN is a data property with default attribute values
-15.1.1.1-0: FAIL
-# BUG: Global.Infinity is a data property with default attribute values
-15.1.1.2-0: FAIL
-# BUG: Global.undefined is a data property with default attribute values
-15.1.1.3-0: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.NaN)
-15.2.3.3-4-178: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.Infinity)
-15.2.3.3-4-179: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (Global.undefined)
-15.2.3.3-4-180: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.source)
-# There is no RegExp.prototype.source
-15.2.3.3-4-212: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.global)
-# There is no RegExp.prototype.global
-15.2.3.3-4-213: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.ignoreCase)
-# There is no RegExp.prototype.ignoreCase
-15.2.3.3-4-214: FAIL
-# BUG: Object.getOwnPropertyDescriptor returns data desc (all false)
-#      for properties on built-ins (RegExp.prototype.multiline)
-15.2.3.3-4-215: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to true successfully when [[Enumerable]] attribute of 'name'
-#      is false and [[Configurable]] attribute of 'name' is true,  the 'desc'
-#      is a generic descriptor which only contains [[Enumerable]] attribute
-#      as true, 'name' property is an index data property (8.12.9 step 8)
-15.2.3.6-4-82-18: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true,  the 'desc' is a generic
-#      descriptor which only contains [Enumerable]] attribute as false and
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-19: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true,  the 'desc' is a generic
-#      descriptor which contains [Enumerable]] attribute as false and
-#      [[Configurable]] property is true, 'name' property is an index accessor
-#      property (8.12.9 step 8)
-15.2.3.6-4-82-20: FAIL
-# Bug? Object.defineProperty - Update [[Configurable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true, the 'desc' is a generic
-#      descriptor which only contains [[Configurable]] attribute as false,
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-21: FAIL
-# Bug? Object.defineProperty - Update [[Configurable]] attribute of 'name'
-#      property to false successfully when [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property are true, the 'desc' is a generic
-#      descriptor which contains [[Enumerable]] attribute as true and
-#      [[Configurable]] attribute is false, 'name' property is an index accessor
-#      property (8.12.9 step 8)
-15.2.3.6-4-82-22: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] and [[Configurable]]
-#      attributes of 'name' property to false successfully when [[Enumerable]]
-#      and [[Configurable]] attributes of 'name' property are true, the 'desc'
-#      is a generic descriptor which contains [[Enumerable]] and
-#      [[Configurable]] attributes as false, 'name' property is an index
-#      accessor property (8.12.9 step 8)
-15.2.3.6-4-82-23: FAIL
-# Bug? Object.defineProperty - Update [[Enumerable]] attributes of 'name'
-#      property to true successfully when [[Enumerable]] attribute of 'name' is
-#      false and [[Configurable]] attribute of 'name' is true, the 'desc' is a
-#      generic descriptor which only contains [[Enumerable]] attribute as true,
-#      'name' property is an index accessor property (8.12.9 step 8)
-15.2.3.6-4-82-24: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
-#      is own data property (15.4.5.1 step 1)
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1756
 15.2.3.6-4-116: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, test the length property of 'O'
-#      is own data property that overrides an inherited data property (15.4.5.1
-#      step 1)
 15.2.3.6-4-117: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test that RangeError exception is thrown when [[Value]] field of
-#      'desc' is undefined (15.4.5.1 step 3.c)
 15.2.3.6-4-125: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is null (15.4.5.1 step 3.c)
 15.2.3.6-4-126: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a boolean with value false
-#      (15.4.5.1 step 3.c)
 15.2.3.6-4-127: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a boolean with value true
-#      (15.4.5.1 step 3.c)
 15.2.3.6-4-128: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is 0 (15.4.5.1 step 3.c)
 15.2.3.6-4-129: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is +0 (15.4.5.1 step 3.c)
 15.2.3.6-4-130: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is -0 (15.4.5.1 step 3.c)
 15.2.3.6-4-131: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is a positive number (15.4.5.1 step 3.c)
 15.2.3.6-4-132: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a negative number (15.4.5.1 step 3.c)
 15.2.3.6-4-133: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is +Infinity (15.4.5.1 step 3.c)
 15.2.3.6-4-134: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is -Infinity (15.4.5.1 step 3.c)
 15.2.3.6-4-135: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is NaN (15.4.5.1 step 3.c)
 15.2.3.6-4-136: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is not thrown when the [[Value]] field of
-#      'desc' is a string containing a positive number (15.4.5.1 step 3.c)
 15.2.3.6-4-137: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing a negative number (15.4.5.1 step 3.c)
 15.2.3.6-4-138: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing a decimal number (15.4.5.1 step 3.c)
 15.2.3.6-4-139: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing +Infinity (15.4.5.1 step 3.c)
 15.2.3.6-4-140: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string containing -Infinity (15.4.5.1 step 3.c)
 15.2.3.6-4-141: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing an
-#      exponential number (15.4.5.1 step 3.c)
 15.2.3.6-4-142: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing a hex
-#      number (15.4.5.1 step 3.c)
 15.2.3.6-4-143: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string containing a number
-#      with leading zeros (15.4.5.1 step 3.c)
 15.2.3.6-4-144: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError exception is thrown when the [[Value]] field of
-#      'desc' is a string which doesn't convert to a number (15.4.5.1 step 3.c)
 15.2.3.6-4-145: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an object which has an own
-#      toString method (15.4.5.1 step 3.c)
 15.2.3.6-4-146: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method (15.4.5.1 step 3.c)
 15.2.3.6-4-147: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method that returns an object and toString method that returns a
-#      string (15.4.5.1 step 3.c)
 15.2.3.6-4-148: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString and valueOf method (15.4.5.1 step 3.c)
 15.2.3.6-4-149: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
-#      Object that both toString and valueOf wouldn't return primitive value
-#      (15.4.5.1 step 3.c)
 15.2.3.6-4-150: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', and the [[Value]] field of 'desc' is an Object with an own toString
-#      method and an inherited valueOf method (15.4.5.1 step 3.c), test that the
-#      inherited valueOf method is used
 15.2.3.6-4-151: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
-#      positive non-integer values (15.4.5.1 step 3.c)
 15.2.3.6-4-152: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length prosperty
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is a
-#      negative non-integer values (15.4.5.1 step 3.c)
 15.2.3.6-4-153: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
-#      (15.4.5.1 step 3.c)
 15.2.3.6-4-154: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
-#      (15.4.5.1 step 3.c)
 15.2.3.6-4-155: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 (15.4.5.1 step 3.c)
 15.2.3.6-4-156: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 + 1 (15.4.5.1 step 3.c)
 15.2.3.6-4-157: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', set the [[Value]] field of 'desc' to a value greater than the
-#      existing value of length (15.4.5.1 step 3.f)
 15.2.3.6-4-159: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', set the [[Value]] field of 'desc' to a value lesser than the
-#      existing value of length and test that indexes beyond the new length are
-#      deleted(15.4.5.1 step 3.f)
 15.2.3.6-4-161: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
 15.2.3.6-4-165: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
 15.2.3.6-4-166: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
 15.2.3.6-4-167: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', whose writable attribute is being changed to false and the [[Value]]
-#      field of 'desc' is less than value of the length property and also lesser
-#      than an index of the array which is set to configurable:false, test that
-#      new length is set to a value greater than the non-deletable index by 1,
-#      writable attribute of length is set to false and TypeError exception is
-#      thrown (15.4.5.1 step 3.i.iii)
 15.2.3.6-4-168: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property and also lesser than an index of the array which is set to
-#      configurable: false, test that new length is set to a value greater than
-#      the non-deletable index by 1, and TypeError is thrown (15.4.5.1 step
-#      3.l.i)
 15.2.3.6-4-169: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property and also lesser than an index of the array which is set to
-#      configurable: false, test that new length is set to a value greater than
-#      the non-deletable index by 1, writable attribute of length is set to
-#      false and TypeError exception is thrown (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-170: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of an inherited data
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-171: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides an inherited data property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-172: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides an inherited accessor property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-173: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-174: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of an inherited accessor
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-175: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides an inherited data property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-176: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides an inherited accessor
-#      property can stop deleting index named properties (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-177: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the configurable large index named property of 'O' is
-#      deleted (15.4.5.1 step 3.l.ii)
 15.2.3.6-4-178: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is greater than value of the length
-#      property, test value of the length property is same as [[Value]]
-#      (15.4.5.1 step 3.l.iii.1)
 15.2.3.6-4-179-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      doesn't contain non-configurable large index named property (15.4.5.1
-#      step 3.m)
 15.2.3.6-4-181: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, 'name' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
 15.2.3.6-4-183: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test TypeError is thrown if the [[Writable]] attribute of the
-#      length property in 'O' is false and value of 'name' equals to value of
-#      the length property (15.4.5.1 step 4.b)
 15.2.3.6-4-188: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test TypeError is thrown if the [[Writable]] attribute of the
-#      length property in 'O' is false and value of 'name' is greater than value
-#      of the length property (15.4.5.1 step 4.b)
 15.2.3.6-4-189: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, 'desc' is accessor descriptor, test updating all attribute
-#      values of 'name' (15.4.5.1 step 4.c)
-15.2.3.6-4-209: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating the [[Enumerable]] attribute value of 'name' (15.4.5.1 step
-#      4.c)
-15.2.3.6-4-271: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating the [[Configurable]] attribute value of 'name' (15.4.5.1
-#      step 4.c)
-15.2.3.6-4-272: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, name is accessor property and 'desc' is accessor descriptor,
-#      test updating multiple attribute values of 'name' (15.4.5.1 step 4.c)
-15.2.3.6-4-273: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('name') + 1
-#      if ToUint32('name') equals to value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
 15.2.3.6-4-275: FAIL
-# Bug? Object.defineProperty - 'O' is an Array, 'name' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('name') + 1
-#      if ToUint32('name') is greater than value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
 15.2.3.6-4-276: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own accessor property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', and 'desc' is accessor descriptor,
-#      test updating multiple attribute values of 'name' (10.6
-#      [[DefineOwnProperty]] step 3 and 5.a.i)
-15.2.3.6-4-291-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object, 'name' is own
-#      accessor property of 'O', and 'desc' is accessor descriptor, test
-#      updating multiple attribute values of 'name' (10.6 [[DefineOwnProperty]]
-#      step 3)
-15.2.3.6-4-291: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own property of 'O' which is also defined in
-#      [[ParameterMap]] of 'O', and 'desc' is data descriptor, test updating
-#      multiple attribute values of 'name' (10.6 [[DefineOwnProperty]] step 3
-#      and 5.b)
 15.2.3.6-4-292-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Value]] attribute value of 'name' which is defined as
-#      unwritable and non-configurable (10.6 [[DefineOwnProperty]] step 4 and
-#      step 5b)
 15.2.3.6-4-293-2: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is not thrown when
-#      updating the [[Value]] attribute value of 'name' which is defined as
-#      non-writable and configurable (10.6 [[DefineOwnProperty]] step 3 and step
-#      5.b)
 15.2.3.6-4-293-3: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Writable]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and 5b)
 15.2.3.6-4-294-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Enumerable]] attribute value of 'name' which is defined as
-#      non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
 15.2.3.6-4-295-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object of a function that has
-#      formal parameters, 'name' is own data property of 'O' which is also
-#      defined in [[ParameterMap]] of 'O', test TypeError is thrown when
-#      updating the [[Configurable]] attribute value of 'name' which is defined
-#      as non-configurable (10.6 [[DefineOwnProperty]] step 4 and step 5b)
 15.2.3.6-4-296-1: FAIL
-# Bug? Object.defineProperty - 'O' is an Arguments object, 'name' is an index
-#      named accessor property of 'O' but not defined in [[ParameterMap]] of
-#      'O', and 'desc' is accessor descriptor, test updating multiple attribute
-#      values of 'name' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.6-4-303: FAIL
-# Bug? ES5 Attributes - indexed property 'P' with attributes [[Writable]]: true,
-#      [[Enumerable]]: true, [[Configurable]]: false is writable using simple
-#      assignment, 'O' is an Arguments object
 15.2.3.6-4-333-11: FAIL
+15.2.3.6-4-360-1: FAIL
+15.2.3.6-4-360-6: FAIL
+15.2.3.6-4-360-7: FAIL
+15.2.3.6-4-405: FAIL
+15.2.3.6-4-410: FAIL
+15.2.3.6-4-415: FAIL
+15.2.3.6-4-420: FAIL
+15.2.3.7-6-a-112: FAIL
+15.2.3.7-6-a-113: FAIL
+15.2.3.7-6-a-122: FAIL
+15.2.3.7-6-a-123: FAIL
+15.2.3.7-6-a-124: FAIL
+15.2.3.7-6-a-125: FAIL
+15.2.3.7-6-a-126: FAIL
+15.2.3.7-6-a-127: FAIL
+15.2.3.7-6-a-128: FAIL
+15.2.3.7-6-a-133: FAIL
+15.2.3.7-6-a-138: FAIL
+15.2.3.7-6-a-139: FAIL
+15.2.3.7-6-a-140: FAIL
+15.2.3.7-6-a-142: FAIL
+15.2.3.7-6-a-143: FAIL
+15.2.3.7-6-a-144: FAIL
+15.2.3.7-6-a-145: FAIL
+15.2.3.7-6-a-147: FAIL
+15.2.3.7-6-a-150: FAIL
+15.2.3.7-6-a-151: FAIL
+15.2.3.7-6-a-155: FAIL
+15.2.3.7-6-a-157: FAIL
+15.2.3.7-6-a-161: FAIL
+15.2.3.7-6-a-162: FAIL
+15.2.3.7-6-a-163: FAIL
+15.2.3.7-6-a-164: FAIL
+15.2.3.7-6-a-165: FAIL
+15.2.3.7-6-a-166: FAIL
+15.2.3.7-6-a-167: FAIL
+15.2.3.7-6-a-168: FAIL
+15.2.3.7-6-a-169: FAIL
+15.2.3.7-6-a-170: FAIL
+15.2.3.7-6-a-171: FAIL
+15.2.3.7-6-a-172: FAIL
+15.2.3.7-6-a-173: FAIL
+15.2.3.7-6-a-174: FAIL
+15.2.3.7-6-a-175: FAIL
+15.2.3.7-6-a-176: FAIL
+15.2.3.7-6-a-177: FAIL
+15.2.3.7-6-a-121: FAIL
+15.2.3.7-6-a-130: FAIL
+15.2.3.7-6-a-129: FAIL
+15.2.3.7-6-a-131: FAIL
+15.2.3.7-6-a-132: FAIL
+15.2.3.7-6-a-136: FAIL
+15.2.3.7-6-a-135: FAIL
+15.2.3.7-6-a-134: FAIL
+15.2.3.7-6-a-137: FAIL
+15.2.3.7-6-a-141: FAIL
+15.2.3.7-6-a-146: FAIL
+15.2.3.7-6-a-148: FAIL
+15.2.3.7-6-a-149: FAIL
+15.2.3.7-6-a-152: FAIL
+15.2.3.7-6-a-153: FAIL
+15.2.3.7-6-a-179: FAIL
+15.2.3.7-6-a-184: FAIL
+15.2.3.7-6-a-185: FAIL
+15.2.3.7-6-a-264: FAIL
+15.2.3.7-6-a-265: FAIL
+15.4.4.14-9-b-i-11: FAIL
+15.4.4.14-9-b-i-13: FAIL
+15.4.4.14-9-b-i-17: FAIL
+15.4.4.14-9-b-i-19: FAIL
+15.4.4.14-9-b-i-28: FAIL
+15.4.4.14-9-b-i-30: FAIL
+15.4.4.15-8-a-14: FAIL
+15.4.4.15-8-b-i-11: FAIL
+15.4.4.15-8-b-i-13: FAIL
+15.4.4.15-8-b-i-17: FAIL
+15.4.4.15-8-b-i-28: FAIL
+15.4.4.15-8-b-i-30: FAIL
+15.4.4.16-7-c-i-10: FAIL
+15.4.4.16-7-c-i-12: FAIL
+15.4.4.16-7-c-i-14: FAIL
+15.4.4.16-7-c-i-18: FAIL
+15.4.4.16-7-c-i-20: FAIL
+15.4.4.16-7-c-i-28: FAIL
+15.4.4.17-7-c-i-10: FAIL
+15.4.4.17-7-c-i-12: FAIL
+15.4.4.17-7-c-i-14: FAIL
+15.4.4.17-7-c-i-18: FAIL
+15.4.4.17-7-c-i-20: FAIL
+15.4.4.17-7-c-i-28: FAIL
+15.4.4.18-7-c-i-10: FAIL
+15.4.4.18-7-c-i-12: FAIL
+15.4.4.18-7-c-i-14: FAIL
+15.4.4.18-7-c-i-18: FAIL
+15.4.4.18-7-c-i-20: FAIL
+15.4.4.18-7-c-i-28: FAIL
+15.4.4.19-8-c-i-10: FAIL
+15.4.4.19-8-c-i-14: FAIL
+15.4.4.19-8-c-i-12: FAIL
+15.4.4.19-8-c-i-18: FAIL
+15.4.4.19-8-c-i-19: FAIL
+15.4.4.19-8-c-i-28: FAIL
+15.4.4.20-9-c-i-10: FAIL
+15.4.4.20-9-c-i-12: FAIL
+15.4.4.20-9-c-i-14: FAIL
+15.4.4.20-9-c-i-18: FAIL
+15.4.4.20-9-c-i-20: FAIL
+15.4.4.20-9-c-i-28: FAIL
+15.4.4.22-8-b-2: FAIL
+15.4.4.22-8-b-iii-1-12: FAIL
+15.4.4.22-8-b-iii-1-18: FAIL
+15.4.4.22-8-b-iii-1-20: FAIL
+15.4.4.22-8-b-iii-1-33: FAIL
+15.4.4.22-8-b-iii-1-30: FAIL
+15.4.4.22-9-b-13: FAIL
+15.4.4.22-9-b-24: FAIL
+15.4.4.22-9-b-26: FAIL
+15.4.4.22-9-b-9: FAIL
+15.4.4.22-9-c-i-30: FAIL
+
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1772
+15.2.3.6-4-292-1: FAIL
+15.2.3.6-4-293-2: FAIL
+15.2.3.6-4-293-3: FAIL
+15.2.3.6-4-294-1: FAIL
+15.2.3.6-4-295-1: FAIL
+15.2.3.6-4-296-1: FAIL
+15.2.3.6-4-333-11: FAIL
+15.2.3.7-6-a-281: FAIL
+15.2.3.7-6-a-282: FAIL
+15.2.3.7-6-a-283: FAIL
+15.2.3.7-6-a-284: FAIL
+15.2.3.7-6-a-285: FAIL
+
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1790
+15.4.4.22-9-9: FAIL
+
+# Invalid test cases (recent change adding var changes semantics)
+S8.3_A1_T1: FAIL
+S15.3_A3_T1: FAIL
+S15.3_A3_T3: FAIL
+
+##################### DELIBERATE INCOMPATIBILITIES #####################
+
+# We deliberately treat arguments to parseInt() with a leading zero as
+# octal numbers in order to not break the web.
+S15.1.2.2_A5.1_T1: FAIL_OK
+
+# This tests precision of trignometric functions.  We're slightly off
+# from the implementation in libc (~ 1e-17) but it's not clear if we
+# or they are closer to the right answer, or if it even matters.
+S15.8.2.16_A7: PASS || FAIL_OK
+S15.8.2.18_A7: PASS || FAIL_OK
+S15.8.2.13_A23: PASS || FAIL_OK
+
+# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
+S15.10.6_A2: FAIL_OK
+
+# We are silent in some regexp cases where the spec wants us to give
+# errors, for compatibility.
+S15.10.2.11_A1_T2: FAIL
+S15.10.2.11_A1_T3: FAIL
+
+# We are more lenient in which string character escapes we allow than
+# the spec (7.8.4 p. 19) wants us to be.  This is for compatibility.
+S7.8.4_A4.3_T3: FAIL_OK
+S7.8.4_A4.3_T4: FAIL_OK
+S7.8.4_A4.3_T5: FAIL_OK
+S7.8.4_A4.3_T6: FAIL_OK
+S7.8.4_A6.1_T4: FAIL_OK
+S7.8.4_A6.2_T1: FAIL_OK
+S7.8.4_A6.2_T2: FAIL_OK
+S7.8.4_A6.4_T1: FAIL_OK
+S7.8.4_A6.4_T2: FAIL_OK
+S7.8.4_A7.1_T4: FAIL_OK
+S7.8.4_A7.2_T1: FAIL_OK
+S7.8.4_A7.2_T2: FAIL_OK
+S7.8.4_A7.2_T3: FAIL_OK
+S7.8.4_A7.2_T4: FAIL_OK
+S7.8.4_A7.2_T5: FAIL_OK
+S7.8.4_A7.2_T6: FAIL_OK
+S7.8.4_A7.4_T1: FAIL_OK
+S7.8.4_A7.4_T2: FAIL_OK
+
+# Sputnik expects unicode escape sequences in RegExp flags to be interpreted.
+# The specification requires them to be passed uninterpreted to the RegExp
+# constructor. We now implement that.
+S7.8.5_A3.1_T7: FAIL_OK
+S7.8.5_A3.1_T8: FAIL_OK
+S7.8.5_A3.1_T9: FAIL_OK
+
+# We allow some keywords to be used as identifiers.
+S7.5.3_A1.15: FAIL_OK
+S7.5.3_A1.18: FAIL_OK
+S7.5.3_A1.21: FAIL_OK
+S7.5.3_A1.22: FAIL_OK
+S7.5.3_A1.23: FAIL_OK
+S7.5.3_A1.24: FAIL_OK
+S7.5.3_A1.26: FAIL_OK
+
+# This checks for non-262 behavior
+S7.6_D1: PASS || FAIL_OK
+S7.6_D2: PASS || FAIL_OK
+S8.4_D1.1: PASS || FAIL_OK
+S8.4_D2.1: PASS || FAIL_OK
+S8.4_D2.2: PASS || FAIL_OK
+S8.4_D2.3: PASS || FAIL_OK
+S8.4_D2.4: PASS || FAIL_OK
+S8.4_D2.5: PASS || FAIL_OK
+S8.4_D2.6: PASS || FAIL_OK
+S8.4_D2.7: PASS || FAIL_OK
+S11.4.3_D1.2: PASS || FAIL_OK
+S12.6.4_A14_T1: PASS || FAIL_OK
+S12.6.4_D1: PASS || FAIL_OK
+S12.6.4_R1: PASS || FAIL_OK
+S12.6.4_R2: PASS || FAIL_OK
+S13.2_D1.2: PASS || FAIL_OK
+S13_D1_T1: PASS || FAIL_OK
+S14_D4_T3: PASS || FAIL_OK
+S14_D7: PASS || FAIL_OK
+S15.1.2.2_D1.2: PASS || FAIL_OK
+S15.5.2_D2: PASS || FAIL_OK
+S15.5.4.11_D1.1_T1: PASS || FAIL_OK
+S15.5.4.11_D1.1_T2: PASS || FAIL_OK
+S15.5.4.11_D1.1_T3: PASS || FAIL_OK
+S15.5.4.11_D1.1_T4: PASS || FAIL_OK
+
+# We allow function declarations within statements
+S12.6.2_A13_T1: FAIL_OK
+S12.6.2_A13_T2: FAIL_OK
+S12.6.4_A13_T1: FAIL_OK
+S12.6.4_A13_T2: FAIL_OK
+S15.3.4.2_A1_T1: FAIL_OK
+
+# Linux and Mac defaults to extended 80 bit floating point format in the FPU.
+# We follow the other major JS engines by keeping this default.
+S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos
+S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
+
+############################# ES3 TESTS ################################
+# These tests check for ES3 semantics, and differ from ES5.
+# When we follow ES5 semantics, it's ok to fail the test.
+
+# Allow keywords as names of properties in object initialisers and
+# in dot-notation property access.
+S11.1.5_A4.1: FAIL_OK
+S11.1.5_A4.2: FAIL_OK
+
+# Calls builtins without an explicit receiver which means that
+# undefined is passed to the builtin. The tests expect the global
+# object to be passed which was true in ES3 but not in ES5.
+S11.1.1_A2: FAIL_OK
+S15.5.4.4_A1_T3: FAIL_OK
+S15.5.4.5_A1_T3: FAIL_OK
+S15.5.4.6_A1_T3: FAIL_OK
+S15.5.4.7_A1_T3: FAIL_OK
+S15.5.4.8_A1_T3: FAIL_OK
+S15.5.4.9_A1_T3: FAIL_OK
+S15.5.4.10_A1_T3: FAIL_OK
+S15.5.4.11_A1_T3: FAIL_OK
+S15.5.4.12_A1_T3: FAIL_OK
+S15.5.4.13_A1_T3: FAIL_OK
+S15.5.4.14_A1_T3: FAIL_OK
+S15.5.4.15_A1_T3: FAIL_OK
+
+# NaN, Infinity and undefined are read-only according to ES5.
+S15.1.1.1_A2_T1: FAIL_OK  # NaN
+S15.1.1.1_A2_T2: FAIL_OK  # NaN
+S15.1.1.2_A2_T1: FAIL_OK  # Infinity
+# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
+S15.1.1.3_A2_T1: FAIL_OK  # undefined
+S15.1.1.3_A2_T2: FAIL_OK  # undefined
+
+# Array.prototype.to[Locale]String is generic in ES5.
+S15.4.4.2_A2_T1: FAIL_OK
+S15.4.4.3_A2_T1: FAIL_OK
+
+######################### UNANALYZED FAILURES ##########################
+
 # Bug? ES5 Attributes - Updating indexed data property 'P' whose attributes are
 #      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
 #      accessor property, 'A' is an Array object (8.12.9 - step 9.b.i)
@@ -522,9 +399,6 @@
 #      [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: true to an
 #      accessor property, 'O' is the global object (8.12.9 - step 9.b.i)
 15.2.3.6-4-360-7: FAIL
-# Bug? ES5 Attributes - [[Value]] attribute of data property is the activex host
-#      object
-15.2.3.6-4-401: FAIL
 # Bug? ES5 Attributes - Failed to add a property to an object when the object's
 #      object has a property with same name and [[Writable]] attribute is set to
 #      false (Number instance)
@@ -541,814 +415,28 @@
 #      prototype has a property with the same name and [[Writable]] set to
 #      false(Function.prototype.bind)
 15.2.3.6-4-420: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.indexOf are correct
-15.2.3.6-4-612: FAIL
-# Bug? ES5 Attributes - all attributes in Object.lastIndexOf are correct
-15.2.3.6-4-613: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.every are correct
-15.2.3.6-4-614: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.some are correct
-15.2.3.6-4-615: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.forEach are correct
-15.2.3.6-4-616: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.map are correct
-15.2.3.6-4-617: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.filter are correct
-15.2.3.6-4-618: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.reduce are correct
-15.2.3.6-4-619: FAIL
-# Bug? ES5 Attributes - all attributes in Array.prototype.reduceRight are
-#      correct
-15.2.3.6-4-620: FAIL
-# Bug? ES5 Attributes - all attributes in String.prototype.trim are correct
-15.2.3.6-4-621: FAIL
-# Bug? ES5 Attributes - all attributes in Date.prototype.toISOString are correct
-15.2.3.6-4-623: FAIL
-# Bug? ES5 Attributes - all attributes in Date.prototype.toJSON are correct
-15.2.3.6-4-624: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, test the length property of
-#      'O' is own data property (15.4.5.1 step 1)
-15.2.3.7-6-a-112: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, test the length property of
-#      'O' is own data property that overrides an inherited data property
-#      (15.4.5.1 step 1)
-15.2.3.7-6-a-113: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test RangeError is thrown when setting the [[Value]] field of 'desc'
-#      to undefined (15.4.5.1 step 3.c)
-15.2.3.7-6-a-121: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test setting the [[Value]] field of 'desc' to null actuall is set to
-#      0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-122: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a boolean with value false
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-123: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a boolean with value true
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-124: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is 0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-125: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is +0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-126: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is -0 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-127: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is positive number (15.4.5.1
-#      step 3.c)
-15.2.3.7-6-a-128: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is negative number (15.4.5.1
-#      step 3.c)
-15.2.3.7-6-a-129: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is +Infinity (15.4.5.1 step
-#      3.c)
-15.2.3.7-6-a-130: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is -Infinity (15.4.5.1 step
-#      3.c)
-15.2.3.7-6-a-131: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is NaN (15.4.5.1 step 3.c)
-15.2.3.7-6-a-132: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      positive number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-133: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      negative number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-134: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing a
-#      decimal number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-135: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing
-#      +Infinity (15.4.5.1 step 3.c)
-15.2.3.7-6-a-136: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing
-#      -Infinity (15.4.5.1 step 3.c)
-15.2.3.7-6-a-137: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an
-#      exponential number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-138: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an hex
-#      number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-139: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is a string containing an
-#      leading zero number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-140: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' is a string which doesn't convert
-#      to a number (15.4.5.1 step 3.c)
-15.2.3.7-6-a-141: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-142: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-143: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      valueOf method that returns an object and toString method that returns a
-#      string (15.4.5.1 step 3.c)
-15.2.3.7-6-a-144: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is an Object which has an own
-#      toString and valueOf method (15.4.5.1 step 3.c)
-15.2.3.7-6-a-145: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test TypeError is thrown when the [[Value]] field of 'desc' is an
-#      Object that both toString and valueOf wouldn't return primitive value
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-146: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test using inherited valueOf method when the [[Value]] field of
-#      'desc' is an Objec with an own toString and inherited valueOf methods
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-147: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      positive non-integer values (15.4.5.1 step 3.c)
-15.2.3.7-6-a-148: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      negative non-integer values (15.4.5.1 step 3.c)
-15.2.3.7-6-a-149: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 2
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-150: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test the [[Value]] field of 'desc' is boundary value 2^32 - 1
-#      (15.4.5.1 step 3.c)
-15.2.3.7-6-a-151: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-152: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'name' is the length property
-#      of 'O', test RangeError is thrown when the [[Value]] field of 'desc' is
-#      boundary value 2^32 + 1 (15.4.5.1 step 3.c)
-15.2.3.7-6-a-153: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' which is greater than value of
-#      the length property is defined into 'O' without deleting any property
-#      with large index named (15.4.5.1 step 3.f)
-15.2.3.7-6-a-155: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', test the [[Value]] field of 'desc' which is less than value of the
-#      length property is defined into 'O' with deleting properties with large
-#      index named (15.4.5.1 step 3.f)
-15.2.3.7-6-a-157: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h)
-15.2.3.7-6-a-161: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to true at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is true (15.4.5.1 step 3.h)
-15.2.3.7-6-a-162: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last after deleting properties with large index named if the
-#      [[Writable]] field of 'desc' is false (15.4.5.1 step 3.i.ii)
-15.2.3.7-6-a-163: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property in 'O'
-#      is set as true before deleting properties with large index named
-#      (15.4.5.1 step 3.i.iii)
-15.2.3.7-6-a-164: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the length property is decreased by 1 (15.4.5.1 step
-#      3.l.i)
-15.2.3.7-6-a-165: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-166: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of inherited data property
-#      with large index named in 'O' can't stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-167: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides inherited data property can stop
-#      deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-168: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own data property with
-#      large index named in 'O' that overrides inherited accessor property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-169: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' can stop deleting index named properties
-#      (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-170: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of inherited accessor
-#      property with large index named in 'O' can't stop deleting index named
-#      properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-171: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides inherited data property can
-#      stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-172: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Configurable]] attribute of own accessor property
-#      with large index named in 'O' that overrides inherited accessor property
-#      can stop deleting index named properties (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-173: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the configurable large index named property of 'O' can be
-#      deleted (15.4.5.1 step 3.l.ii)
-15.2.3.7-6-a-174: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test value of the length property is set to the last
-#      non-configurable index named property of 'O' plus 1 (15.4.5.1 step
-#      3.l.iii.1)
-15.2.3.7-6-a-175: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      contains non-configurable large index named property (15.4.5.1 step
-#      3.l.iii.2)
-15.2.3.7-6-a-176: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is the length property of
-#      'O', the [[Value]] field of 'desc' is less than value of the length
-#      property, test the [[Writable]] attribute of the length property is set
-#      to false at last when the [[Writable]] field of 'desc' is false and 'O'
-#      doesn't contain non-configurable large index named property (15.4.5.1
-#      step 3.m)
-15.2.3.7-6-a-177: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, 'P' is boundary value 2^32 - 2 (15.4.5.1 step 4.a)
-15.2.3.7-6-a-179: FAIL
-# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
-#      an array index named property,[[Writable]] attribute of the length
-#      property in 'O' is false, value of 'P' is equal to value of the length
-#      property in 'O' (15.4.5.1 step 4.b)
-15.2.3.7-6-a-184: FAIL
-# Bug? Object.defineProperties - TypeError is thrown if 'O' is an Array, 'P' is
-#      an array index named property,[[Writable]] attribute of the length
-#      property in 'O' is false, value of 'P' is bigger than value of the length
-#      property in 'O' (15.4.5.1 step 4.b)
-15.2.3.7-6-a-185: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, 'desc' is accessor descriptor, test updating all attribute
-#      values of 'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-205: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating the [[Enumerable]] attribute value of
-#      'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-260: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating the [[Configurable]] attribute value
-#      of 'P' (15.4.5.1 step 4.c)
-15.2.3.7-6-a-261: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property that already exists on 'O' is accessor property and 'desc' is
-#      accessor descriptor, test updating multiple attribute values of 'P'
-#      (15.4.5.1 step 4.c)
-15.2.3.7-6-a-262: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('P') + 1 if
-#      ToUint32('P') equals to value of the length property in 'O' (15.4.5.1
-#      step 4.e.ii)
-15.2.3.7-6-a-264: FAIL
-# Bug? Object.defineProperties - 'O' is an Array, 'P' is an array index named
-#      property, test the length property of 'O' is set as ToUint32('P') + 1 if
-#      ToUint32('P') is greater than value of the length property in 'O'
-#      (15.4.5.1 step 4.e.ii)
-15.2.3.7-6-a-265: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own accessor
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', and
-#      'desc' is accessor descriptor, test updating multiple attribute values of
-#      'P' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-280: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', and
-#      'desc' is data descriptor, test updating multiple attribute values of 'P'
-#      (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-281: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Value]] attribute value of 'P'
-#      whose writable and configurable attributes are false (10.6
-#      [[DefineOwnProperty]] step 4)
-15.2.3.7-6-a-282: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Writable]] attribute value of 'P'
-#      which is defined as non-configurable (10.6 [[DefineOwnProperty]] step 4)
-15.2.3.7-6-a-283: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Enumerable]] attribute value of
-#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
-#      4)
-15.2.3.7-6-a-284: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is own data
-#      property of 'O' which is also defined in [[ParameterMap]] of 'O', test
-#      TypeError is thrown when updating the [[Configurable]] attribute value of
-#      'P' which is defined as non-configurable (10.6 [[DefineOwnProperty]] step
-#      4)
-15.2.3.7-6-a-285: FAIL
-# Bug? Object.defineProperties - 'O' is an Arguments object, 'P' is an array
-#      index named accessor property of 'O' but not defined in [[ParameterMap]]
-#      of 'O', and 'desc' is accessor descriptor, test updating multiple
-#      attribute values of 'P' (10.6 [[DefineOwnProperty]] step 3)
-15.2.3.7-6-a-292: FAIL
-# Bug? Strict Mode - 'this' value is a string which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-1-s: FAIL
-# Bug? Strict Mode - 'this' value is a number which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-2-s: FAIL
-# Bug? Strict Mode - 'this' value is a boolean which cannot be converted to
-#      wrapper objects when the function is called with an array of arguments
-15.3.4.3-3-s: FAIL
-# Bug? Function.prototype.bind - [[Get]] attribute of 'caller' property in 'F'
-#      is thrower
-15.3.4.5-20-2: FAIL
-# Bug? Function.prototype.bind - [[Set]] attribute of 'caller' property in 'F'
-#      is thrower
-15.3.4.5-20-3: FAIL
-# Bug? Function.prototype.bind - [[Get]] attribute of 'arguments' property in
-#      'F' is thrower
-15.3.4.5-21-2: FAIL
-# Bug? Function.prototype.bind - [[Set]] attribute of 'arguments' property in
-#      'F' is thrower
-15.3.4.5-21-3: FAIL
-# Bug? Array.prototype.indexOf - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.14-9-a-19: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.14-9-b-i-11: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.14-9-b-i-13: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.14-9-b-i-17: FAIL
-# Bug? Array.prototype.indexOf - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.14-9-b-i-19: FAIL
-# Bug? Array.prototype.indexOf - side-effects are visible in subsequent
-#      iterations on an Array
-15.4.4.14-9-b-i-28: FAIL
-# Bug? Array.prototype.indexOf - terminates iteration on unhandled exception on
-#      an Array
-15.4.4.14-9-b-i-30: FAIL
-# Bug? Array.prototype.lastIndexOf - deleting property of prototype causes
-#      prototype index property not to be visited on an Array
-15.4.4.15-8-a-14: FAIL
-# Bug? Array.prototype.lastIndexOf - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.15-8-a-19: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.15-8-b-i-11: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.15-8-b-i-13: FAIL
-# Bug? Array.prototype.lastIndexOf - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.15-8-b-i-17: FAIL
-# Bug? Array.prototype.lastIndexOf - side-effects are visible in subsequent
-#      iterations on an Array
-15.4.4.15-8-b-i-28: FAIL
-# Bug? Array.prototype.lastIndexOf terminates iteration on unhandled exception
-#      on an Array
-15.4.4.15-8-b-i-30: FAIL
-# Bug? Array.prototype.every applied to boolean primitive
-15.4.4.16-1-3: FAIL
-# Bug? Array.prototype.every applied to number primitive
-15.4.4.16-1-5: FAIL
-# Bug? Array.prototype.every applied to string primitive
-15.4.4.16-1-7: FAIL
-# Bug? Array.prototype.every - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.16-4-8: FAIL
-# Bug? Array.prototype.every - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.16-4-9: FAIL
-# Bug? Array.prototype.every - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.16-4-10: FAIL
-# Bug? Array.prototype.every - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.16-4-11: FAIL
-# Bug? Array.prototype.every - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.16-4-15: FAIL
-# Bug? Array.prototype.every - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.16-7-b-16: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.16-7-c-i-10: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.16-7-c-i-12: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.16-7-c-i-14: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.16-7-c-i-18: FAIL
-# Bug? Array.prototype.every - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.16-7-c-i-20: FAIL
-# Bug? Array.prototype.every - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.16-7-c-i-28: FAIL
-# Bug? Array.prototype.some applied to boolean primitive
-15.4.4.17-1-3: FAIL
-# Bug? Array.prototype.some applied to number primitive
-15.4.4.17-1-5: FAIL
-# Bug? Array.prototype.some applied to applied to string primitive
-15.4.4.17-1-7: FAIL
-# Bug? Array.prototype.some - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.17-4-8: FAIL
-# Bug? Array.prototype.some - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.17-4-9: FAIL
-# Bug? Array.prototype.some - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.17-4-10: FAIL
-# Bug? Array.prototype.some - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.17-4-11: FAIL
-# Bug? Array.prototype.some - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.17-4-15: FAIL
-# Bug? Array.prototype.some - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.17-7-b-16: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.17-7-c-i-10: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.17-7-c-i-12: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.17-7-c-i-14: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.17-7-c-i-18: FAIL
-# Bug? Array.prototype.some - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.17-7-c-i-20: FAIL
-# Bug? Array.prototype.some - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.17-7-c-i-28: FAIL
-# Bug? Array.prototype.forEach applied to boolean primitive
-15.4.4.18-1-3: FAIL
-# Bug? Array.prototype.forEach applied to number primitive
-15.4.4.18-1-5: FAIL
-# Bug? Array.prototype.forEach applied to string primitive
-15.4.4.18-1-7: FAIL
-# Bug? Array.prototype.forEach - side effects produced by step 2 are visible
-#      when an exception occurs
-15.4.4.18-4-8: FAIL
-# Bug? Array.prototype.forEach - side effects produced by step 3 are visible
-#      when an exception occurs
-15.4.4.18-4-9: FAIL
-# Bug? Array.prototype.forEach - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.18-4-10: FAIL
-# Bug? Array.prototype.forEach - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.18-4-11: FAIL
-# Bug? Array.prototype.forEach - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.18-4-15: FAIL
-# Bug? Array.prototype.forEach - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.18-7-b-16: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property on an Array
-15.4.4.18-7-c-i-10: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.18-7-c-i-12: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property that overrides an inherited accessor property on an Array
-15.4.4.18-7-c-i-14: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.18-7-c-i-18: FAIL
-# Bug? Array.prototype.forEach - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.18-7-c-i-20: FAIL
-# Bug? Array.prototype.forEach - element changed by getter on previous
-#      iterations is observed on an Array
-15.4.4.18-7-c-i-28: FAIL
-# Bug? Array.prototype.map - applied to boolean primitive
-15.4.4.19-1-3: FAIL
-# Bug? Array.prototype.map - applied to number primitive
-15.4.4.19-1-5: FAIL
-# Bug? Array.prototype.map - applied to string primitive
-15.4.4.19-1-7: FAIL
-# Bug? Array.prototype.map - Side effects produced by step 2 are visible when an
-#      exception occurs
-15.4.4.19-4-8: FAIL
-# Bug? Array.prototype.map - Side effects produced by step 3 are visible when an
-#      exception occurs
-15.4.4.19-4-9: FAIL
-# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
-#      by step 2
-15.4.4.19-4-10: FAIL
-# Bug? Array.prototype.map - the exception is not thrown if exception was thrown
-#      by step 3
-15.4.4.19-4-11: FAIL
-# Bug? Array.prototype.map - calling with no callbackfn is the same as passing
-#      undefined for callbackfn
-15.4.4.19-4-15: FAIL
-# Bug? Array.prototype.map - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.19-8-b-16: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property on
-#      an Array
-15.4.4.19-8-c-i-10: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.19-8-c-i-12: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.19-8-c-i-14: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.19-8-c-i-18: FAIL
-# Bug? Array.prototype.map - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.19-8-c-i-19: FAIL
-# Bug? Array.prototype.map - element changed by getter on previous iterations is
-#      observed on an Array
-15.4.4.19-8-c-i-28: FAIL
-# Bug? Array.prototype.filter applied to boolean primitive
-15.4.4.20-1-3: FAIL
-# Bug? Array.prototype.filter applied to number primitive
-15.4.4.20-1-5: FAIL
-# Bug? Array.prototype.filter applied to string primitive
-15.4.4.20-1-7: FAIL
-# Bug? Array.prototype.filter - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.20-4-8: FAIL
-# Bug? Array.prototype.filter - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.20-4-9: FAIL
-# Bug? Array.prototype.filter - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.20-4-10: FAIL
-# Bug? Array.prototype.filter - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.20-4-11: FAIL
-# Bug? Array.prototype.filter - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.20-4-15: FAIL
-# Bug? Array.prototype.filter - properties can be added to prototype after
-#      current position are visited on an Array-like object
-15.4.4.20-9-b-6: FAIL
-# Bug? Array.prototype.filter - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.20-9-b-16: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own data property
-#      that overrides an inherited accessor property on an Array
-15.4.4.20-9-c-i-6: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      on an Array
-15.4.4.20-9-c-i-10: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      that overrides an inherited data property on an Array
-15.4.4.20-9-c-i-12: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      that overrides an inherited accessor property on an Array
-15.4.4.20-9-c-i-14: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
-#      property on an Array
-15.4.4.20-9-c-i-16: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      without a get function on an Array
-15.4.4.20-9-c-i-18: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is own accessor property
-#      without a get function that overrides an inherited accessor property on
-#      an Array
-15.4.4.20-9-c-i-20: FAIL
-# Bug? Array.prototype.filter - element to be retrieved is inherited accessor
-#      property without a get function on an Array
-15.4.4.20-9-c-i-22: FAIL
-# Bug? Array.prototype.filter - element changed by getter on previous iterations
-#      is observed on an Array
-15.4.4.20-9-c-i-28: FAIL
-# Bug? Array.prototype.reduce applied to boolean primitive
-15.4.4.21-1-3: FAIL
-# Bug? Array.prototype.reduce applied to number primitive
-15.4.4.21-1-5: FAIL
-# Bug? Array.prototype.reduce applied to string primitive
-15.4.4.21-1-7: FAIL
-# Bug? Array.prototype.reduce - side effects produced by step 2 are visible when
-#      an exception occurs
-15.4.4.21-4-8: FAIL
-# Bug? Array.prototype.reduce - side effects produced by step 3 are visible when
-#      an exception occurs
-15.4.4.21-4-9: FAIL
-# Bug? Array.prototype.reduce - the exception is not thrown if exception was
-#      thrown by step 2
-15.4.4.21-4-10: FAIL
-# Bug? Array.prototype.reduce - the exception is not thrown if exception was
-#      thrown by step 3
-15.4.4.21-4-11: FAIL
-# Bug? Array.prototype.reduce - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.21-4-15: FAIL
-# Bug? Array.prototype.reduce - decreasing length of array in step 8 does not
-#      delete non-configurable properties
-15.4.4.21-9-b-16: FAIL
-# Bug? Array.prototype.reduce - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.21-9-b-29: FAIL
-# Bug? Array.prototype.reduceRight applied to boolean primitive
-15.4.4.22-1-3: FAIL
-# Bug? Array.prototype.reduceRight applied to number primitive
-15.4.4.22-1-5: FAIL
-# Bug? Array.prototype.reduceRight applied to string primitive
-15.4.4.22-1-7: FAIL
-# Bug? Array.prototype.reduceRight - side effects produced by step 2 are visible
-#      when an exception occurs
-15.4.4.22-4-8: FAIL
-# Bug? Array.prototype.reduceRight - side effects produced by step 3 are visible
-#      when an exception occurs
-15.4.4.22-4-9: FAIL
-# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
-#      was thrown by step 2
-15.4.4.22-4-10: FAIL
-# Bug? Array.prototype.reduceRight - the exception is not thrown if exception
-#      was thrown by step 3
-15.4.4.22-4-11: FAIL
-# Bug? Array.prototype.reduceRight - calling with no callbackfn is the same as
-#      passing undefined for callbackfn
-15.4.4.22-4-15: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property that overrides an inherited data property on an Array
-15.4.4.22-8-b-iii-1-12: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property without a get function on an Array
-15.4.4.22-8-b-iii-1-18: FAIL
-# Bug? Array.prototype.reduceRight - element to be retrieved is own accessor
-#      property without a get function that overrides an inherited accessor
-#      property on an Array
-15.4.4.22-8-b-iii-1-20: FAIL
-# Bug? Array.prototype.reduceRight - element changed by getter on current
-#      iteration is observed in subsequent iterations on an Array
-15.4.4.22-8-b-iii-1-30: FAIL
-# Bug? Array.prototype.reduceRight - Exception in getter terminate iteration on
-#      an Array
-15.4.4.22-8-b-iii-1-33: FAIL
-# Bug? Array.prototype.reduceRight - modifications to length don't change number
-#      of iterations in step 9
-15.4.4.22-8-b-2: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property in step 8 causes
-#      deleted index property not to be visited on an Array
-15.4.4.22-9-b-9: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property with prototype
-#      property in step 8 causes prototype index property to be visited on an
-#      Array
-15.4.4.22-9-b-13: FAIL
-# Bug? Array.prototype.reduceRight - decreasing length of array in step 8 does
-#      not delete non-configurable properties
-15.4.4.22-9-b-16: FAIL
-# Bug? Array.prototype.reduceRight - deleting property of prototype causes
-#      deleted index property not to be visited on an Array
-15.4.4.22-9-b-24: FAIL
-# Bug? Array.prototype.reduceRight - deleting own property with prototype
-#      property causes prototype index property to be visited on an Array
-15.4.4.22-9-b-26: FAIL
-# Bug? Array.prototype.reduceRight - decreasing length of array does not delete
-#      non-configurable properties
-15.4.4.22-9-b-29: FAIL
-# Bug? Array.prototype.reduceRight - element changed by getter on previous
-#      iterations is observed on an Array
-15.4.4.22-9-c-i-30: FAIL
-# Bug? Array.prototype.reduceRight - modifications to length will change number
-#      of iterations
-15.4.4.22-9-9: FAIL
-# Bug? String.prototype.trim - 'S' is a string with all WhiteSpace
-15.5.4.20-3-2: FAIL
-# Bug? String.prototype.trim - 'S' is a string with all union of WhiteSpace and
-#      LineTerminator
-15.5.4.20-3-3: FAIL
-# Bug? String.prototype.trim - 'S' is a string start with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-4: FAIL
-# Bug? String.prototype.trim - 'S' is a string end with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-5: FAIL
-# Bug? String.prototype.trim - 'S' is a string start with union of all
-#      LineTerminator and all WhiteSpace and end with union of all
-#      LineTerminator and all WhiteSpace
-15.5.4.20-3-6: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators (\\uFEFFabc)
-15.5.4.20-4-10: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators (abc\\uFEFF)
-15.5.4.20-4-18: FAIL
-# Bug? String.prototype.trim handles whitepace and lineterminators
-#      (\\uFEFF\\uFEFF)
-15.5.4.20-4-34: FAIL
-# Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
-#      Date(1970, 0, -99999999, 0, 0, 0, -1), the time zone is UTC(0)
-15.9.5.43-0-8: FAIL
-# Bug? Date.prototype.toISOString - RangeError is not thrown when value of date
-#      is Date(1970, 0, 100000001, 0, 0, 0, -1), the time zone is UTC(0)
-15.9.5.43-0-11: FAIL
-# Bug? Date.prototype.toISOString - RangeError is not thrown when value of date
-#      is Date(1970, 0, 100000001, 0, 0, 0, 0), the time zone is UTC(0)
-15.9.5.43-0-12: FAIL
-# Bug? Date.prototype.toISOString - RangeError is thrown when value of date is
-#      Date(1970, 0, 100000001, 0, 0, 0, 1), the time zone is UTC(0)
-15.9.5.43-0-13: FAIL
-# Bug? Date.prototype.toISOString - when value of year is -Infinity
-#      Date.prototype.toISOString throw the RangeError
-15.9.5.43-0-14: FAIL
-# Bug? Date.prototype.toISOString - value of year is Infinity
-#      Date.prototype.toISOString throw the RangeError
-15.9.5.43-0-15: FAIL
-# Bug? RegExp - the thrown error is SyntaxError instead of RegExpError when 'F'
-#      contains any character other than 'g', 'i', or 'm'
-15.10.4.1-3: FAIL
-# Bug? RegExp.prototype is itself a RegExp
-15.10.6: FAIL
-# Bug? RegExp.prototype.source is of type String
-15.10.7.1-1: FAIL
-# Bug? RegExp.prototype.source is a data property with default attribute values
-#      (false)
-15.10.7.1-2: FAIL
-# Bug? RegExp.prototype.global is of type Boolean
-15.10.7.2-1: FAIL
-# Bug? RegExp.prototype.global is a data property with default attribute values
-#      (false)
-15.10.7.2-2: FAIL
-# Bug? RegExp.prototype.ignoreCase is of type Boolean
-15.10.7.3-1: FAIL
-# Bug? RegExp.prototype.ignoreCase is a data property with default attribute
-#      values (false)
-15.10.7.3-2: FAIL
-# Bug? RegExp.prototype.multiline is of type Boolean
-15.10.7.4-1: FAIL
-# Bug? RegExp.prototype.multiline is a data property with default attribute
-#      values (false)
-15.10.7.4-2: FAIL
-# Bug? RegExp.prototype.lastIndex is of type Number
-15.10.7.5-1: FAIL
-# Bug? RegExp.prototype.lastIndex is a data property with specified attribute
-#      values
-15.10.7.5-2: FAIL
-# Bug? Error.prototype.toString return the value of 'msg' when 'name' is empty
-#      string and 'msg' isn't undefined
-15.11.4.4-8-1: FAIL
+
+############################ SKIPPED TESTS #############################
+
+# These tests take a looong time to run in debug mode.
+S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
+S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
+
+[ $arch == arm ]
+
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+
+[ $arch == mips ]
+
+# Skip all tests on MIPS.
+*: SKIP
diff --git a/test/test262/testcfg.py b/test/test262/testcfg.py
index 9482046..52127cd 100644
--- a/test/test262/testcfg.py
+++ b/test/test262/testcfg.py
@@ -43,10 +43,10 @@
     self.root = root
 
   def IsNegative(self):
-    return self.filename.endswith('-n.js')
+    return '@negative' in self.GetSource()
 
   def GetLabel(self):
-    return "%s test262 %s %s" % (self.mode, self.GetGroup(), self.GetName())
+    return "%s test262 %s" % (self.mode, self.GetName())
 
   def IsFailureOutput(self, output):
     if output.exit_code != 0:
@@ -63,9 +63,6 @@
   def GetName(self):
     return self.path[-1]
 
-  def GetGroup(self):
-    return self.path[0]
-
   def GetSource(self):
     return open(self.filename).read()
 
@@ -75,13 +72,14 @@
   def __init__(self, context, root):
     super(Test262TestConfiguration, self).__init__(context, root)
 
-  def AddIETestCenter(self, tests, current_path, path, mode):
-    current_root = join(self.root, 'data', 'test', 'suite', 'ietestcenter')
+  def ListTests(self, current_path, path, mode, variant_flags):
+    testroot = join(self.root, 'data', 'test', 'suite')
     harness = [join(self.root, 'data', 'test', 'harness', f)
                    for f in TEST_262_HARNESS]
     harness += [join(self.root, 'harness-adapt.js')]
-    for root, dirs, files in os.walk(current_root):
-      for dotted in [x  for x in dirs if x.startswith('.')]:
+    tests = []
+    for root, dirs, files in os.walk(testroot):
+      for dotted in [x for x in dirs if x.startswith('.')]:
         dirs.remove(dotted)
       dirs.sort()
       root_path = root[len(self.root):].split(os.path.sep)
@@ -89,25 +87,11 @@
       files.sort()
       for file in files:
         if file.endswith('.js'):
-          if self.Contains(path, root_path):
-            test_path = ['ietestcenter', file[:-3]]
+          test_path = ['test262', file[:-3]]
+          if self.Contains(path, test_path):
             test = Test262TestCase(join(root, file), test_path, self.context,
                                    self.root, mode, harness)
             tests.append(test)
-
-  def AddSputnikConvertedTests(self, tests, current_path, path, mode):
-    # To be enabled
-    pass
-
-  def AddSputnikTests(self, tests, current_path, path, mode):
-    # To be enabled
-    pass
-
-  def ListTests(self, current_path, path, mode, variant_flags):
-    tests = []
-    self.AddIETestCenter(tests, current_path, path, mode)
-    self.AddSputnikConvertedTests(tests, current_path, path, mode)
-    self.AddSputnikTests(tests, current_path, path, mode)
     return tests
 
   def GetBuildRequirements(self):
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
index 511ab2b..de3dc90 100755
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -226,6 +226,10 @@
     return r['pause'] - r['external']
   return 0
 
+
+def real_mutator(r):
+  return r['mutator'] - r['stepstook']
+
 plots = [
   [
     Set('style fill solid 0.5 noborder'),
@@ -236,7 +240,24 @@
          Item('Sweep', 'sweep', lc = 'blue'),
          Item('Compaction', 'compact', lc = 'red'),
          Item('External', 'external', lc = '#489D43'),
-         Item('Other', other_scope, lc = 'grey'))
+         Item('Other', other_scope, lc = 'grey'),
+         Item('IGC Steps', 'stepstook', lc = '#FF6347'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('Compaction', 'compact', lc = 'red'),
+         Item('External', 'external', lc = '#489D43'),
+         Item('Other', other_scope, lc = '#ADD8E6'),
+         Item('External', 'external', lc = '#D3D3D3'))
+  ],
+
+  [
+    Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
   ],
   [
     Set('style histogram rowstacked'),
@@ -275,7 +296,7 @@
   return reduce(lambda t,r: f(t, r[field]), trace, init)
 
 def calc_total(trace, field):
-  return freduce(lambda t,v: t + v, field, trace, 0)
+  return freduce(lambda t,v: t + long(v), field, trace, long(0))
 
 def calc_max(trace, field):
   return freduce(lambda t,r: max(t, r), field, trace, 0)
@@ -290,6 +311,8 @@
   marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
   markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
   scavenges = filter(lambda r: r['gc'] == 's', trace)
+  globalgcs = filter(lambda r: r['gc'] != 's', trace)
+
 
   charts = plot_all(plots, trace, filename)
 
@@ -302,7 +325,7 @@
     else:
       avg = 0
     if n > 1:
-      dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
+      dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
                       (n - 1))
     else:
       dev = 0
@@ -311,6 +334,31 @@
               '<td>%d</td><td>%d [dev %f]</td></tr>' %
               (prefix, n, total, max, avg, dev))
 
+  def HumanReadable(size):
+    suffixes = ['bytes', 'kB', 'MB', 'GB']
+    power = 1
+    for i in range(len(suffixes)):
+      if size < power*1024:
+        return "%.1f" % (float(size) / power) + " " + suffixes[i]
+      power *= 1024
+
+  def throughput(name, trace):
+    total_live_after = calc_total(trace, 'total_size_after')
+    total_live_before = calc_total(trace, 'total_size_before')
+    total_gc = calc_total(trace, 'pause')
+    if total_gc == 0:
+      return
+    out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_after),
+               total_gc,
+               HumanReadable(total_live_after / total_gc)))
+    out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_before),
+               total_gc,
+               HumanReadable(total_live_before / total_gc)))
+
 
   with open(filename + '.html', 'w') as out:
     out.write('<html><body>')
@@ -329,6 +377,11 @@
           filter(lambda r: r['external'] != 0, trace),
           'external')
     out.write('</table>')
+    throughput('TOTAL', trace)
+    throughput('MS', marksweeps)
+    throughput('MC', markcompacts)
+    throughput('OLDSPACE', globalgcs)
+    out.write('<br/>')
     for chart in charts:
       out.write('<img src="%s">' % chart)
       out.write('</body></html>')
diff --git a/tools/gcmole/gccause.lua b/tools/gcmole/gccause.lua
index a6fe542..b989176 100644
--- a/tools/gcmole/gccause.lua
+++ b/tools/gcmole/gccause.lua
@@ -48,6 +48,8 @@
 	    T[f] = true
 	    TrackCause(f, (lvl or 0) + 1)
 	 end
+
+	 if f == '<GC>' then break end
       end
    end
 end
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 5014417..8fe9910 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -72,11 +72,7 @@
                   },
                 }],
                 ['soname_version!=""', {
-                  # Ideally, we'd like to specify the full filename for the
-                  # library and set it to "libv8.so.<(soname_version)",
-                  # but currently the best we can do is use 'product_name' and
-                  # get "libv8-<(soname_version).so".
-                  'product_name': 'v8-<(soname_version)',
+                  'product_extension': 'so.<(soname_version)',
                 }],
               ],
             },
@@ -240,7 +236,6 @@
             '../../src/assembler.cc',
             '../../src/assembler.h',
             '../../src/ast.cc',
-            '../../src/ast-inl.h',
             '../../src/ast.h',
             '../../src/atomicops_internals_x86_gcc.cc',
             '../../src/bignum.cc',
@@ -340,6 +335,8 @@
             '../../src/ic-inl.h',
             '../../src/ic.cc',
             '../../src/ic.h',
+            '../../src/incremental-marking.cc',
+            '../../src/incremental-marking.h',
             '../../src/inspector.cc',
             '../../src/inspector.h',
             '../../src/interpreter-irregexp.cc',
@@ -394,6 +391,7 @@
             '../../src/prettyprinter.h',
             '../../src/property.cc',
             '../../src/property.h',
+            '../../src/property-details.h',
             '../../src/profile-generator-inl.h',
             '../../src/profile-generator.cc',
             '../../src/profile-generator.h',
@@ -431,6 +429,9 @@
             '../../src/spaces-inl.h',
             '../../src/spaces.cc',
             '../../src/spaces.h',
+            '../../src/store-buffer-inl.h',
+            '../../src/store-buffer.cc',
+            '../../src/store-buffer.h',
             '../../src/string-search.cc',
             '../../src/string-search.h',
             '../../src/string-stream.cc',
@@ -549,6 +550,40 @@
                 '../../src/ia32/stub-cache-ia32.cc',
               ],
             }],
+            ['v8_target_arch=="mips"', {
+              'sources': [
+                '../../src/mips/assembler-mips.cc',
+                '../../src/mips/assembler-mips.h',
+                '../../src/mips/assembler-mips-inl.h',
+                '../../src/mips/builtins-mips.cc',
+                '../../src/mips/codegen-mips.cc',
+                '../../src/mips/codegen-mips.h',
+                '../../src/mips/code-stubs-mips.cc',
+                '../../src/mips/code-stubs-mips.h',
+                '../../src/mips/constants-mips.cc',
+                '../../src/mips/constants-mips.h',
+                '../../src/mips/cpu-mips.cc',
+                '../../src/mips/debug-mips.cc',
+                '../../src/mips/deoptimizer-mips.cc',
+                '../../src/mips/disasm-mips.cc',
+                '../../src/mips/frames-mips.cc',
+                '../../src/mips/frames-mips.h',
+                '../../src/mips/full-codegen-mips.cc',
+                '../../src/mips/ic-mips.cc',
+                '../../src/mips/lithium-codegen-mips.cc',
+                '../../src/mips/lithium-codegen-mips.h',
+                '../../src/mips/lithium-gap-resolver-mips.cc',
+                '../../src/mips/lithium-gap-resolver-mips.h',
+                '../../src/mips/lithium-mips.cc',
+                '../../src/mips/lithium-mips.h',
+                '../../src/mips/macro-assembler-mips.cc',
+                '../../src/mips/macro-assembler-mips.h',
+                '../../src/mips/regexp-macro-assembler-mips.cc',
+                '../../src/mips/regexp-macro-assembler-mips.h',
+                '../../src/mips/simulator-mips.cc',
+                '../../src/mips/stub-cache-mips.cc',
+              ],
+            }],
             ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
               'sources': [
                 '../../src/x64/assembler-x64-inl.h',
@@ -586,7 +621,8 @@
                     ['v8_compress_startup_data=="bz2"', {
                       'libraries': [
                         '-lbz2',
-                    ]}],
+                      ]
+                    }],
                   ],
                 },
                 'sources': [
@@ -596,26 +632,30 @@
               }
             ],
             ['OS=="android"', {
+                'defines': [
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
                 'sources': [
                   '../../src/platform-posix.cc',
                 ],
                 'conditions': [
-                  ['host_os=="mac" and _toolset!="target"', {
-                    'sources': [
-                      '../../src/platform-macos.cc'
-                    ]
+                  ['host_os=="mac"', {
+                    'target_conditions': [
+                      ['_toolset=="host"', {
+                        'sources': [
+                          '../../src/platform-macos.cc'
+                        ]
+                      }, {
+                        'sources': [
+                          '../../src/platform-linux.cc'
+                        ]
+                      }],
+                    ],
                   }, {
                     'sources': [
                       '../../src/platform-linux.cc'
                     ]
                   }],
-                  ['_toolset=="target"', {
-                    'link_settings': {
-                      'libraries': [
-                        '-llog',
-                       ],
-                     }
-                  }],
                 ],
               },
             ],
@@ -641,6 +681,13 @@
                 ],
               }
             ],
+            ['OS=="solaris"', {
+                'sources': [
+                  '../../src/platform-solaris.cc',
+                  '../../src/platform-posix.cc',
+                ],
+              }
+            ],
             ['OS=="mac"', {
               'sources': [
                 '../../src/platform-macos.cc',
@@ -697,7 +744,7 @@
             'experimental_library_files': [
               '../../src/macros.py',
               '../../src/proxy.js',
-              '../../src/weakmap.js',
+              '../../src/collection.js',
             ],
           },
           'actions': [
@@ -743,6 +790,7 @@
           'target_name': 'mksnapshot',
           'type': 'executable',
           'dependencies': [
+            'v8_base',
             'v8_nosnapshot',
           ],
           'include_dirs+': [
@@ -760,8 +808,8 @@
             ['v8_compress_startup_data=="bz2"', {
               'libraries': [
                 '-lbz2',
-              ]}
-            ],
+              ]
+            }],
           ],
         },
         {
@@ -786,7 +834,8 @@
             ['v8_compress_startup_data=="bz2"', {
               'libraries': [
                 '-lbz2',
-              ]}],
+              ]
+            }],
           ],
         },
         {
@@ -858,7 +907,7 @@
       'targets': [
         {
           'target_name': 'v8',
-          'type': 'settings',
+          'type': 'none',
           'conditions': [
             ['want_separate_host_toolset==1', {
               'toolsets': ['host', 'target'],
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index 0b0a1fb..7070ce6 100755
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -1,20 +1,5 @@
 #!/bin/sh
 
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
-  d8_public=`which d8`
-  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ ! -x $d8_exec ]; then
-  echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'scons <flags> d8' from the V8 directory"
-  exit 1
-fi
-
-
 # find the name of the log file to process, it must not start with a dash.
 log_file="v8.log"
 for arg in "$@"
@@ -24,6 +9,28 @@
   fi
 done
 
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x $d8_exec ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x $d8_exec ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x $d8_exec ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'make native' from the V8 directory"
+  exit 1
+fi
 
 # nm spits out 'no symbols found' messages to stderr.
 cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 58cbb95..5c07d91 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -334,6 +334,7 @@
   _ARCH_TO_POINTER_TYPE_MAP = {
     "ia32": ctypes.c_uint32,
     "arm": ctypes.c_uint32,
+    "mips": ctypes.c_uint32,
     "x64": ctypes.c_uint64
   }
 
@@ -399,12 +400,16 @@
         code = Code(name, start_address, end_address, origin, origin_offset)
         conficting_code = self.code_map.Find(start_address)
         if conficting_code:
-          LogReader._HandleCodeConflict(conficting_code, code)
-          # TODO(vitalyr): this warning is too noisy because of our
-          # attempts to reconstruct code log from the snapshot.
-          # print >>sys.stderr, \
-          #     "Warning: Skipping duplicate code log entry %s" % code
-          continue
+          if not (conficting_code.start_address == code.start_address and
+            conficting_code.end_address == code.end_address):
+            self.code_map.Remove(conficting_code)
+          else:
+            LogReader._HandleCodeConflict(conficting_code, code)
+            # TODO(vitalyr): this warning is too noisy because of our
+            # attempts to reconstruct code log from the snapshot.
+            # print >>sys.stderr, \
+            #     "Warning: Skipping duplicate code log entry %s" % code
+            continue
         self.code_map.Add(code)
         continue
 
diff --git a/tools/logreader.js b/tools/logreader.js
index 315e721..a8141da 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,9 +134,8 @@
 LogReader.prototype.dispatchLogRow_ = function(fields) {
   // Obtain the dispatch.
   var command = fields[0];
-  if (!(command in this.dispatchTable_)) {
-    throw new Error('unknown command: ' + command);
-  }
+  if (!(command in this.dispatchTable_)) return;
+
   var dispatch = this.dispatchTable_[command];
 
   if (dispatch === null || this.skipDispatch(dispatch)) {
diff --git a/tools/presubmit.py b/tools/presubmit.py
index fda7ba9..7af6e3d 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -211,7 +211,12 @@
     if exists(local_cpplint):
       command = ['python', local_cpplint, '--filter', filt] + join(files)
 
-    process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    try:
+      process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    except:
+      print('Error running cpplint.py. Please make sure you have depot_tools' +
+            ' in your $PATH. Lint check skipped.')
+      return True
     LINT_ERROR_PATTERN = re.compile(r'^(.+)[:(]\d+[:)]')
     while True:
       out_line = process.stderr.readline()
diff --git a/tools/push-to-trunk.sh b/tools/push-to-trunk.sh
index 761b733..302c5f2 100755
--- a/tools/push-to-trunk.sh
+++ b/tools/push-to-trunk.sh
@@ -38,6 +38,7 @@
 PATCH_FILE="$PERSISTFILE_BASENAME-patch"
 COMMITMSG_FILE="$PERSISTFILE_BASENAME-commitmsg"
 TOUCHED_FILES_FILE="$PERSISTFILE_BASENAME-touched-files"
+TRUNK_REVISION_FILE="$PERSISTFILE_BASENAME-trunkrevision"
 STEP=0
 
 
@@ -202,10 +203,14 @@
   for commit in $COMMITS ; do
     # Get the commit's title line.
     git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
-    # Grep for "BUG=xxxx" lines in the commit message.
-    git log -1 $commit --format="%b" | grep BUG= | grep -v "BUG=$" \
-                                     | sed -e 's/^/        /' \
-                                     >> "$CHANGELOG_ENTRY_FILE"
+    # Grep for "BUG=xxxx" lines in the commit message and convert them to
+    # "(issue xxxx)".
+    git log -1 $commit --format="%B" \
+        | grep "^BUG=" | grep -v "BUG=$" \
+        | sed -e 's/^/        /' \
+        | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
+        | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
+        >> "$CHANGELOG_ENTRY_FILE"
     # Append the commit's author for reference.
     git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
     echo "" >> "$CHANGELOG_ENTRY_FILE"
@@ -221,7 +226,13 @@
   $EDITOR "$CHANGELOG_ENTRY_FILE"
   NEWCHANGELOG=$(mktemp)
   # Eliminate any trailing newlines by going through a shell variable.
-  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE")
+  # Also (1) eliminate tabs, (2) fix too little and (3) too much indentation,
+  # and (4) eliminate trailing whitespace.
+  CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE" \
+                   | sed -e 's/\t/        /g' \
+                   | sed -e 's/^ \{1,7\}\([^ ]\)/        \1/g' \
+                   | sed -e 's/^ \{9,80\}\([^ ]\)/        \1/g' \
+                   | sed -e 's/ \+$//')
   [[ -n "$CHANGELOGENTRY" ]] || die "Empty ChangeLog entry."
   echo "$CHANGELOGENTRY" > "$NEWCHANGELOG"
   echo "" >> "$NEWCHANGELOG" # Explicitly insert two empty lines.
@@ -256,8 +267,10 @@
   restore_if_unset "NEWMAJOR"
   restore_if_unset "NEWMINOR"
   restore_if_unset "NEWBUILD"
-  git commit -a -m "Prepare push to trunk.  \
-Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD." \
+  PREPARE_COMMIT_MSG="Prepare push to trunk.  \
+Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD."
+  persist "PREPARE_COMMIT_MSG"
+  git commit -a -m "$PREPARE_COMMIT_MSG" \
     || die "'git commit -a' failed."
 fi
 
@@ -272,7 +285,8 @@
 if [ $STEP -le 9 ] ; then
   echo ">>> Step 9: Commit to the repository."
   echo "Please wait for an LGTM, then type \"LGTM<Return>\" to commit your \
-change. (If you need to iterate on the patch, do so in another shell.)"
+change. (If you need to iterate on the patch, do so in another shell. Do not \
+modify the existing local commit's commit message.)"
   unset ANSWER
   while [ "$ANSWER" != "LGTM" ] ; do
     [[ -n "$ANSWER" ]] && echo "That was not 'LGTM'."
@@ -294,15 +308,21 @@
 fi
 
 if [ $STEP -le 10 ] ; then
-  echo ">>> Step 10: NOP"
-  # Present in the manual guide, not necessary (even harmful!) for this script.
+  echo ">>> Step 10: Fetch straggler commits that sneaked in between \
+steps 1 and 9."
+  git svn fetch || die "'git svn fetch' failed."
+  git checkout svn/bleeding_edge
+  restore_if_unset "PREPARE_COMMIT_MSG"
+  PREPARE_COMMIT_HASH=$(git log -1 --format=%H --grep="$PREPARE_COMMIT_MSG")
+  persist "PREPARE_COMMIT_HASH"
 fi
 
 if [ $STEP -le 11 ] ; then
   echo ">>> Step 11: Squash commits into one."
   # Instead of relying on "git rebase -i", we'll just create a diff, because
   # that's easier to automate.
-  git diff svn/trunk > "$PATCH_FILE"
+  restore_if_unset "PREPARE_COMMIT_HASH"
+  git diff svn/trunk $PREPARE_COMMIT_HASH > "$PATCH_FILE"
   # Convert the ChangeLog entry to commit message format:
   # - remove date
   # - remove indentation
@@ -397,7 +417,10 @@
 
 if [ $STEP -le 17 ] ; then
   echo ">>> Step 17. Commit to SVN."
-  git svn dcommit || die "'git svn dcommit' failed."
+  git svn dcommit | tee >(grep -E "^Committed r[0-9]+" \
+                          | sed -e 's/^Committed r\([0-9]\+\)/\1/' \
+                          > "$TRUNK_REVISION_FILE") \
+    || die "'git svn dcommit' failed."
 fi
 
 if [ $STEP -le 18 ] ; then
@@ -424,8 +447,10 @@
   restore_if_unset "MINOR"
   restore_if_unset "BUILD"
   echo "Congratulations, you have successfully created the trunk revision \
-$MAJOR.$MINOR.$BUILD. Please don't forget to update the v8rel spreadsheet, \
-and to roll this new version into Chromium."
+$MAJOR.$MINOR.$BUILD. Please don't forget to roll this new version into \
+Chromium, and to update the v8rel spreadsheet:"
+  TRUNK_REVISION=$(cat "$TRUNK_REVISION_FILE")
+  echo -e "$MAJOR.$MINOR.$BUILD\ttrunk\t$TRUNK_REVISION"
   # Clean up all temporary files.
   rm -f "$PERSISTFILE_BASENAME"*
 fi
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
index ad5449a..a990b7e 100755
--- a/tools/test-wrapper-gypbuild.py
+++ b/tools/test-wrapper-gypbuild.py
@@ -131,16 +131,20 @@
 
 
 def ProcessOptions(options):
-  if options.arch_and_mode != None and options.arch_and_mode != "":
-    tokens = options.arch_and_mode.split(".")
-    options.arch = tokens[0]
-    options.mode = tokens[1]
-  options.mode = options.mode.split(',')
+  if options.arch_and_mode == ".":
+    options.arch = []
+    options.mode = []
+  else:
+    if options.arch_and_mode != None and options.arch_and_mode != "":
+      tokens = options.arch_and_mode.split(".")
+      options.arch = tokens[0]
+      options.mode = tokens[1]
+    options.mode = options.mode.split(',')
+    options.arch = options.arch.split(',')
   for mode in options.mode:
     if not mode in ['debug', 'release']:
       print "Unknown mode %s" % mode
       return False
-  options.arch = options.arch.split(',')
   for arch in options.arch:
     if not arch in ['ia32', 'x64', 'arm']:
       print "Unknown architecture %s" % arch
@@ -165,7 +169,7 @@
   if options.snapshot:
     result += ['--snapshot']
   if options.special_command:
-    result += ['--special-command=' + options.special_command]
+    result += ['--special-command="%s"' % options.special_command]
   if options.valgrind:
     result += ['--valgrind']
   if options.cat:
@@ -232,6 +236,18 @@
                                env=env)
       returncodes += child.wait()
 
+  if len(options.mode) == 0 and len(options.arch) == 0:
+    print ">>> running tests"
+    shellpath = workspace + '/' + options.outdir
+    env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
+    shell = shellpath + '/d8'
+    child = subprocess.Popen(' '.join(args_for_children +
+                                      ['--shell=' + shell]),
+                             shell=True,
+                             cwd=workspace,
+                             env=env)
+    returncodes = child.wait()
+
   return returncodes
 
 
diff --git a/tools/utils.py b/tools/utils.py
index fb94d14..232314c 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -61,6 +61,8 @@
     return 'openbsd'
   elif id == 'SunOS':
     return 'solaris'
+  elif id == 'NetBSD':
+    return 'netbsd'
   else:
     return None